Networking fixes for 5.18-rc5, including fixes from bluetooth, bpf

and netfilter.
 
 Current release - new code bugs:
 
  - bridge: switchdev: check br_vlan_group() return value
 
  - use this_cpu_inc() to increment net->core_stats, fix preempt-rt
 
 Previous releases - regressions:
 
  - eth: stmmac: fix write to sgmii_adapter_base
 
 Previous releases - always broken:
 
  - netfilter: nf_conntrack_tcp: re-init for syn packets only,
    resolving issues with TCP fastopen
 
  - tcp: md5: fix incorrect tcp_header_len for incoming connections
 
  - tcp: fix F-RTO may not work correctly when receiving DSACK
 
  - tcp: ensure use of most recently sent skb when filling rate samples
 
  - tcp: fix potential xmit stalls caused by TCP_NOTSENT_LOWAT
 
  - virtio_net: fix wrong buf address calculation when using xdp
 
  - xsk: fix forwarding when combining copy mode with busy poll
 
  - xsk: fix possible crash when multiple sockets are created
 
  - bpf: lwt: fix crash when using bpf_skb_set_tunnel_key() from
    bpf_xmit lwt hook
 
  - sctp: null-check asoc strreset_chunk in sctp_generate_reconf_event
 
  - wireguard: device: check for metadata_dst with skb_valid_dst()
 
  - netfilter: update ip6_route_me_harder to consider L3 domain
 
  - gre: make o_seqno start from 0 in native mode
 
  - gre: switch o_seqno to atomic to prevent races in collect_md mode
 
 Misc:
 
  - add Eric Dumazet to networking maintainers
 
  - dt: dsa: realtek: remove realtek,rtl8367s string
 
  - netfilter: flowtable: Remove the empty file
 
 Signed-off-by: Jakub Kicinski <kuba@kernel.org>
 -----BEGIN PGP SIGNATURE-----
 
 iQIzBAABCAAdFiEE6jPA+I1ugmIBA4hXMUZtbf5SIrsFAmJq2r4ACgkQMUZtbf5S
 IrthIxAAjGEcLr25lkB0IWcjOD5wqOuhaRKeSWXnbm5bPkWIaxVMsssBAR8DS78S
 bsaJ0yTKQqv4vLtlMjtQpVC/azr0NTmsb0y5+6C5d4IObBf2Mv1LPpkiqs0d+phQ
 khPsCh0QGtSJT9VbaMu5+JW+c6Jo0kVmnmOgmhZMo1QKFw/bocdJrxQoZjcve9/X
 /uTDFEn8dPWbQKOm1TmSvQhuEPh1V6ZAf8/cBikN2Yul1R0EYbZO6RKSfrgqaa7T
 aRMMTEwRoTEuRdjF97F7ZgGXhoRxP9rW+bdzoQ0ewRu+KgKKPjCL2eVgeNfTNptj
 FjaUpNrImkYUxQ8+x7sQVPdwFaScVVtjHFfqFl0CsvhddT6nQw2trElccfy3/16K
 0GEKBXKCB3B9h02fhillPFveZzDChy/5NTezARqMYP0eG5SBUHLCCymZnqnoNkwV
 hdcmciZTnJzIxPJcmlp8F7D5etueDOwh03nMcFxRf3eW0IcC+Hl6qtbOFUzr6KhB
 V/TLh7N+Smy3JtsavU9aj4iSQGR+kChCt5zhH9idkuEsUgdYo3apB4q3k3ShzWqM
 SJx4gxp5HxwobLx+uW/HJMJTmwA8fApMbIl0OOPm+qiHfULufCZb6BS3AZGb7jdX
 wrcEPeZxBTCZqHeQc0kuo9/CtTTvbawJYtP5LRiZ5bWfoKn5F9o=
 =XOSt
 -----END PGP SIGNATURE-----

Merge tag 'net-5.18-rc5' of git://git.kernel.org/pub/scm/linux/kernel/git/netdev/net

Pull networking fixes from Jakub Kicinski:
 "Including fixes from bluetooth, bpf and netfilter.

  Current release - new code bugs:

   - bridge: switchdev: check br_vlan_group() return value

   - use this_cpu_inc() to increment net->core_stats, fix preempt-rt

  Previous releases - regressions:

   - eth: stmmac: fix write to sgmii_adapter_base

  Previous releases - always broken:

   - netfilter: nf_conntrack_tcp: re-init for syn packets only,
     resolving issues with TCP fastopen

   - tcp: md5: fix incorrect tcp_header_len for incoming connections

   - tcp: fix F-RTO may not work correctly when receiving DSACK

   - tcp: ensure use of most recently sent skb when filling rate samples

   - tcp: fix potential xmit stalls caused by TCP_NOTSENT_LOWAT

   - virtio_net: fix wrong buf address calculation when using xdp

   - xsk: fix forwarding when combining copy mode with busy poll

   - xsk: fix possible crash when multiple sockets are created

   - bpf: lwt: fix crash when using bpf_skb_set_tunnel_key() from
     bpf_xmit lwt hook

   - sctp: null-check asoc strreset_chunk in sctp_generate_reconf_event

   - wireguard: device: check for metadata_dst with skb_valid_dst()

   - netfilter: update ip6_route_me_harder to consider L3 domain

   - gre: make o_seqno start from 0 in native mode

   - gre: switch o_seqno to atomic to prevent races in collect_md mode

  Misc:

   - add Eric Dumazet to networking maintainers

   - dt: dsa: realtek: remove realtek,rtl8367s string

   - netfilter: flowtable: Remove the empty file"

* tag 'net-5.18-rc5' of git://git.kernel.org/pub/scm/linux/kernel/git/netdev/net: (65 commits)
  tcp: fix F-RTO may not work correctly when receiving DSACK
  Revert "ibmvnic: Add ethtool private flag for driver-defined queue limits"
  net: enetc: allow tc-etf offload even with NETIF_F_CSUM_MASK
  ixgbe: ensure IPsec VF<->PF compatibility
  MAINTAINERS: Update BNXT entry with firmware files
  netfilter: nft_socket: only do sk lookups when indev is available
  net: fec: add missing of_node_put() in fec_enet_init_stop_mode()
  bnx2x: fix napi API usage sequence
  tls: Skip tls_append_frag on zero copy size
  Add Eric Dumazet to networking maintainers
  netfilter: conntrack: fix udp offload timeout sysctl
  netfilter: nf_conntrack_tcp: re-init for syn packets only
  net: dsa: lantiq_gswip: Don't set GSWIP_MII_CFG_RMII_CLK
  net: Use this_cpu_inc() to increment net->core_stats
  Bluetooth: hci_sync: Cleanup hci_conn if it cannot be aborted
  Bluetooth: hci_event: Fix creating hci_conn object on error status
  Bluetooth: hci_event: Fix checking for invalid handle on error status
  ice: fix use-after-free when deinitializing mailbox snapshot
  ice: wait 5 s for EMP reset after firmware flash
  ice: Protect vf_state check by cfg_lock in ice_vc_process_vf_msg()
  ...
This commit is contained in:
Linus Torvalds 2022-04-28 12:34:50 -07:00
commit 249aca0d3d
70 changed files with 604 additions and 420 deletions

View File

@ -27,32 +27,25 @@ description:
The realtek-mdio driver is an MDIO driver and it must be inserted inside The realtek-mdio driver is an MDIO driver and it must be inserted inside
an MDIO node. an MDIO node.
The compatible string is only used to identify which (silicon) family the
switch belongs to. Roughly speaking, a family is any set of Realtek switches
whose chip identification register(s) have a common location and semantics.
The different models in a given family can be automatically disambiguated by
parsing the chip identification register(s) according to the given family,
avoiding the need for a unique compatible string for each model.
properties: properties:
compatible: compatible:
enum: enum:
- realtek,rtl8365mb - realtek,rtl8365mb
- realtek,rtl8366
- realtek,rtl8366rb - realtek,rtl8366rb
- realtek,rtl8366s
- realtek,rtl8367
- realtek,rtl8367b
- realtek,rtl8367rb
- realtek,rtl8367s
- realtek,rtl8368s
- realtek,rtl8369
- realtek,rtl8370
description: | description: |
realtek,rtl8365mb: 4+1 ports realtek,rtl8365mb:
realtek,rtl8366: 5+1 ports Use with models RTL8363NB, RTL8363NB-VB, RTL8363SC, RTL8363SC-VB,
realtek,rtl8366rb: 5+1 ports RTL8364NB, RTL8364NB-VB, RTL8365MB, RTL8366SC, RTL8367RB-VB, RTL8367S,
realtek,rtl8366s: 5+1 ports RTL8367SB, RTL8370MB, RTL8310SR
realtek,rtl8367: realtek,rtl8366rb:
realtek,rtl8367b: Use with models RTL8366RB, RTL8366S
realtek,rtl8367rb: 5+2 ports
realtek,rtl8367s: 5+2 ports
realtek,rtl8368s: 8 ports
realtek,rtl8369: 8+1 ports
realtek,rtl8370: 8+2 ports
mdc-gpios: mdc-gpios:
description: GPIO line for the MDC clock line. description: GPIO line for the MDC clock line.
@ -335,7 +328,7 @@ examples:
#size-cells = <0>; #size-cells = <0>;
switch@29 { switch@29 {
compatible = "realtek,rtl8367s"; compatible = "realtek,rtl8365mb";
reg = <29>; reg = <29>;
reset-gpios = <&gpio2 20 GPIO_ACTIVE_LOW>; reset-gpios = <&gpio2 20 GPIO_ACTIVE_LOW>;

View File

@ -3913,7 +3913,9 @@ BROADCOM BNXT_EN 50 GIGABIT ETHERNET DRIVER
M: Michael Chan <michael.chan@broadcom.com> M: Michael Chan <michael.chan@broadcom.com>
L: netdev@vger.kernel.org L: netdev@vger.kernel.org
S: Supported S: Supported
F: drivers/firmware/broadcom/tee_bnxt_fw.c
F: drivers/net/ethernet/broadcom/bnxt/ F: drivers/net/ethernet/broadcom/bnxt/
F: include/linux/firmware/broadcom/tee_bnxt_fw.h
BROADCOM BRCM80211 IEEE802.11n WIRELESS DRIVER BROADCOM BRCM80211 IEEE802.11n WIRELESS DRIVER
M: Arend van Spriel <aspriel@gmail.com> M: Arend van Spriel <aspriel@gmail.com>
@ -13623,6 +13625,7 @@ F: net/core/drop_monitor.c
NETWORKING DRIVERS NETWORKING DRIVERS
M: "David S. Miller" <davem@davemloft.net> M: "David S. Miller" <davem@davemloft.net>
M: Eric Dumazet <edumazet@google.com>
M: Jakub Kicinski <kuba@kernel.org> M: Jakub Kicinski <kuba@kernel.org>
M: Paolo Abeni <pabeni@redhat.com> M: Paolo Abeni <pabeni@redhat.com>
L: netdev@vger.kernel.org L: netdev@vger.kernel.org
@ -13670,6 +13673,7 @@ F: tools/testing/selftests/drivers/net/dsa/
NETWORKING [GENERAL] NETWORKING [GENERAL]
M: "David S. Miller" <davem@davemloft.net> M: "David S. Miller" <davem@davemloft.net>
M: Eric Dumazet <edumazet@google.com>
M: Jakub Kicinski <kuba@kernel.org> M: Jakub Kicinski <kuba@kernel.org>
M: Paolo Abeni <pabeni@redhat.com> M: Paolo Abeni <pabeni@redhat.com>
L: netdev@vger.kernel.org L: netdev@vger.kernel.org

View File

@ -1681,9 +1681,6 @@ static void gswip_phylink_mac_config(struct dsa_switch *ds, int port,
break; break;
case PHY_INTERFACE_MODE_RMII: case PHY_INTERFACE_MODE_RMII:
miicfg |= GSWIP_MII_CFG_MODE_RMIIM; miicfg |= GSWIP_MII_CFG_MODE_RMIIM;
/* Configure the RMII clock as output: */
miicfg |= GSWIP_MII_CFG_RMII_CLK;
break; break;
case PHY_INTERFACE_MODE_RGMII: case PHY_INTERFACE_MODE_RGMII:
case PHY_INTERFACE_MODE_RGMII_ID: case PHY_INTERFACE_MODE_RGMII_ID:

View File

@ -40,8 +40,9 @@ int mv88e6xxx_port_hidden_wait(struct mv88e6xxx_chip *chip)
{ {
int bit = __bf_shf(MV88E6XXX_PORT_RESERVED_1A_BUSY); int bit = __bf_shf(MV88E6XXX_PORT_RESERVED_1A_BUSY);
return mv88e6xxx_wait_bit(chip, MV88E6XXX_PORT_RESERVED_1A_CTRL_PORT, return mv88e6xxx_port_wait_bit(chip,
MV88E6XXX_PORT_RESERVED_1A, bit, 0); MV88E6XXX_PORT_RESERVED_1A_CTRL_PORT,
MV88E6XXX_PORT_RESERVED_1A, bit, 0);
} }
int mv88e6xxx_port_hidden_read(struct mv88e6xxx_chip *chip, int block, int port, int mv88e6xxx_port_hidden_read(struct mv88e6xxx_chip *chip, int block, int port,

View File

@ -267,7 +267,6 @@ static const struct of_device_id realtek_mdio_of_match[] = {
#endif #endif
#if IS_ENABLED(CONFIG_NET_DSA_REALTEK_RTL8365MB) #if IS_ENABLED(CONFIG_NET_DSA_REALTEK_RTL8365MB)
{ .compatible = "realtek,rtl8365mb", .data = &rtl8365mb_variant, }, { .compatible = "realtek,rtl8365mb", .data = &rtl8365mb_variant, },
{ .compatible = "realtek,rtl8367s", .data = &rtl8365mb_variant, },
#endif #endif
{ /* sentinel */ }, { /* sentinel */ },
}; };

View File

@ -551,10 +551,6 @@ static const struct of_device_id realtek_smi_of_match[] = {
.compatible = "realtek,rtl8365mb", .compatible = "realtek,rtl8365mb",
.data = &rtl8365mb_variant, .data = &rtl8365mb_variant,
}, },
{
.compatible = "realtek,rtl8367s",
.data = &rtl8365mb_variant,
},
#endif #endif
{ /* sentinel */ }, { /* sentinel */ },
}; };

View File

@ -14153,10 +14153,6 @@ static int bnx2x_eeh_nic_unload(struct bnx2x *bp)
/* Stop Tx */ /* Stop Tx */
bnx2x_tx_disable(bp); bnx2x_tx_disable(bp);
/* Delete all NAPI objects */
bnx2x_del_all_napi(bp);
if (CNIC_LOADED(bp))
bnx2x_del_all_napi_cnic(bp);
netdev_reset_tc(bp->dev); netdev_reset_tc(bp->dev);
del_timer_sync(&bp->timer); del_timer_sync(&bp->timer);
@ -14261,6 +14257,11 @@ static pci_ers_result_t bnx2x_io_slot_reset(struct pci_dev *pdev)
bnx2x_drain_tx_queues(bp); bnx2x_drain_tx_queues(bp);
bnx2x_send_unload_req(bp, UNLOAD_RECOVERY); bnx2x_send_unload_req(bp, UNLOAD_RECOVERY);
bnx2x_netif_stop(bp, 1); bnx2x_netif_stop(bp, 1);
bnx2x_del_all_napi(bp);
if (CNIC_LOADED(bp))
bnx2x_del_all_napi_cnic(bp);
bnx2x_free_irq(bp); bnx2x_free_irq(bp);
/* Report UNLOAD_DONE to MCP */ /* Report UNLOAD_DONE to MCP */

View File

@ -2035,6 +2035,11 @@ static struct sk_buff *bcmgenet_add_tsb(struct net_device *dev,
return skb; return skb;
} }
static void bcmgenet_hide_tsb(struct sk_buff *skb)
{
__skb_pull(skb, sizeof(struct status_64));
}
static netdev_tx_t bcmgenet_xmit(struct sk_buff *skb, struct net_device *dev) static netdev_tx_t bcmgenet_xmit(struct sk_buff *skb, struct net_device *dev)
{ {
struct bcmgenet_priv *priv = netdev_priv(dev); struct bcmgenet_priv *priv = netdev_priv(dev);
@ -2141,6 +2146,8 @@ static netdev_tx_t bcmgenet_xmit(struct sk_buff *skb, struct net_device *dev)
} }
GENET_CB(skb)->last_cb = tx_cb_ptr; GENET_CB(skb)->last_cb = tx_cb_ptr;
bcmgenet_hide_tsb(skb);
skb_tx_timestamp(skb); skb_tx_timestamp(skb);
/* Decrement total BD count and advance our write pointer */ /* Decrement total BD count and advance our write pointer */

View File

@ -297,10 +297,6 @@ int enetc_setup_tc_txtime(struct net_device *ndev, void *type_data)
if (tc < 0 || tc >= priv->num_tx_rings) if (tc < 0 || tc >= priv->num_tx_rings)
return -EINVAL; return -EINVAL;
/* Do not support TXSTART and TX CSUM offload simutaniously */
if (ndev->features & NETIF_F_CSUM_MASK)
return -EBUSY;
/* TSD and Qbv are mutually exclusive in hardware */ /* TSD and Qbv are mutually exclusive in hardware */
if (enetc_rd(&priv->si->hw, ENETC_QBV_PTGCR_OFFSET) & ENETC_QBV_TGE) if (enetc_rd(&priv->si->hw, ENETC_QBV_PTGCR_OFFSET) & ENETC_QBV_TGE)
return -EBUSY; return -EBUSY;

View File

@ -3731,7 +3731,7 @@ static int fec_enet_init_stop_mode(struct fec_enet_private *fep,
ARRAY_SIZE(out_val)); ARRAY_SIZE(out_val));
if (ret) { if (ret) {
dev_dbg(&fep->pdev->dev, "no stop mode property\n"); dev_dbg(&fep->pdev->dev, "no stop mode property\n");
return ret; goto out;
} }
fep->stop_gpr.gpr = syscon_node_to_regmap(gpr_np); fep->stop_gpr.gpr = syscon_node_to_regmap(gpr_np);

View File

@ -1065,19 +1065,23 @@ int hns_mac_init(struct dsaf_device *dsaf_dev)
device_for_each_child_node(dsaf_dev->dev, child) { device_for_each_child_node(dsaf_dev->dev, child) {
ret = fwnode_property_read_u32(child, "reg", &port_id); ret = fwnode_property_read_u32(child, "reg", &port_id);
if (ret) { if (ret) {
fwnode_handle_put(child);
dev_err(dsaf_dev->dev, dev_err(dsaf_dev->dev,
"get reg fail, ret=%d!\n", ret); "get reg fail, ret=%d!\n", ret);
return ret; return ret;
} }
if (port_id >= max_port_num) { if (port_id >= max_port_num) {
fwnode_handle_put(child);
dev_err(dsaf_dev->dev, dev_err(dsaf_dev->dev,
"reg(%u) out of range!\n", port_id); "reg(%u) out of range!\n", port_id);
return -EINVAL; return -EINVAL;
} }
mac_cb = devm_kzalloc(dsaf_dev->dev, sizeof(*mac_cb), mac_cb = devm_kzalloc(dsaf_dev->dev, sizeof(*mac_cb),
GFP_KERNEL); GFP_KERNEL);
if (!mac_cb) if (!mac_cb) {
fwnode_handle_put(child);
return -ENOMEM; return -ENOMEM;
}
mac_cb->fw_port = child; mac_cb->fw_port = child;
mac_cb->mac_id = (u8)port_id; mac_cb->mac_id = (u8)port_id;
dsaf_dev->mac_cb[port_id] = mac_cb; dsaf_dev->mac_cb[port_id] = mac_cb;

View File

@ -75,7 +75,7 @@ int hclge_comm_tqps_update_stats(struct hnae3_handle *handle,
ret = hclge_comm_cmd_send(hw, &desc, 1); ret = hclge_comm_cmd_send(hw, &desc, 1);
if (ret) { if (ret) {
dev_err(&hw->cmq.csq.pdev->dev, dev_err(&hw->cmq.csq.pdev->dev,
"failed to get tqp stat, ret = %d, tx = %u.\n", "failed to get tqp stat, ret = %d, rx = %u.\n",
ret, i); ret, i);
return ret; return ret;
} }
@ -89,7 +89,7 @@ int hclge_comm_tqps_update_stats(struct hnae3_handle *handle,
ret = hclge_comm_cmd_send(hw, &desc, 1); ret = hclge_comm_cmd_send(hw, &desc, 1);
if (ret) { if (ret) {
dev_err(&hw->cmq.csq.pdev->dev, dev_err(&hw->cmq.csq.pdev->dev,
"failed to get tqp stat, ret = %d, rx = %u.\n", "failed to get tqp stat, ret = %d, tx = %u.\n",
ret, i); ret, i);
return ret; return ret;
} }

View File

@ -562,12 +562,12 @@ static void hns3_dbg_tx_spare_info(struct hns3_enet_ring *ring, char *buf,
for (i = 0; i < ring_num; i++) { for (i = 0; i < ring_num; i++) {
j = 0; j = 0;
sprintf(result[j++], "%8u", i); sprintf(result[j++], "%u", i);
sprintf(result[j++], "%9u", ring->tx_copybreak); sprintf(result[j++], "%u", ring->tx_copybreak);
sprintf(result[j++], "%3u", tx_spare->len); sprintf(result[j++], "%u", tx_spare->len);
sprintf(result[j++], "%3u", tx_spare->next_to_use); sprintf(result[j++], "%u", tx_spare->next_to_use);
sprintf(result[j++], "%3u", tx_spare->next_to_clean); sprintf(result[j++], "%u", tx_spare->next_to_clean);
sprintf(result[j++], "%3u", tx_spare->last_to_clean); sprintf(result[j++], "%u", tx_spare->last_to_clean);
sprintf(result[j++], "%pad", &tx_spare->dma); sprintf(result[j++], "%pad", &tx_spare->dma);
hns3_dbg_fill_content(content, sizeof(content), hns3_dbg_fill_content(content, sizeof(content),
tx_spare_info_items, tx_spare_info_items,
@ -598,35 +598,35 @@ static void hns3_dump_rx_queue_info(struct hns3_enet_ring *ring,
u32 base_add_l, base_add_h; u32 base_add_l, base_add_h;
u32 j = 0; u32 j = 0;
sprintf(result[j++], "%8u", index); sprintf(result[j++], "%u", index);
sprintf(result[j++], "%6u", readl_relaxed(ring->tqp->io_base + sprintf(result[j++], "%u", readl_relaxed(ring->tqp->io_base +
HNS3_RING_RX_RING_BD_NUM_REG)); HNS3_RING_RX_RING_BD_NUM_REG));
sprintf(result[j++], "%6u", readl_relaxed(ring->tqp->io_base + sprintf(result[j++], "%u", readl_relaxed(ring->tqp->io_base +
HNS3_RING_RX_RING_BD_LEN_REG)); HNS3_RING_RX_RING_BD_LEN_REG));
sprintf(result[j++], "%4u", readl_relaxed(ring->tqp->io_base + sprintf(result[j++], "%u", readl_relaxed(ring->tqp->io_base +
HNS3_RING_RX_RING_TAIL_REG)); HNS3_RING_RX_RING_TAIL_REG));
sprintf(result[j++], "%4u", readl_relaxed(ring->tqp->io_base + sprintf(result[j++], "%u", readl_relaxed(ring->tqp->io_base +
HNS3_RING_RX_RING_HEAD_REG)); HNS3_RING_RX_RING_HEAD_REG));
sprintf(result[j++], "%6u", readl_relaxed(ring->tqp->io_base + sprintf(result[j++], "%u", readl_relaxed(ring->tqp->io_base +
HNS3_RING_RX_RING_FBDNUM_REG)); HNS3_RING_RX_RING_FBDNUM_REG));
sprintf(result[j++], "%6u", readl_relaxed(ring->tqp->io_base + sprintf(result[j++], "%u", readl_relaxed(ring->tqp->io_base +
HNS3_RING_RX_RING_PKTNUM_RECORD_REG)); HNS3_RING_RX_RING_PKTNUM_RECORD_REG));
sprintf(result[j++], "%9u", ring->rx_copybreak); sprintf(result[j++], "%u", ring->rx_copybreak);
sprintf(result[j++], "%7s", readl_relaxed(ring->tqp->io_base + sprintf(result[j++], "%s", readl_relaxed(ring->tqp->io_base +
HNS3_RING_EN_REG) ? "on" : "off"); HNS3_RING_EN_REG) ? "on" : "off");
if (hnae3_ae_dev_tqp_txrx_indep_supported(ae_dev)) if (hnae3_ae_dev_tqp_txrx_indep_supported(ae_dev))
sprintf(result[j++], "%10s", readl_relaxed(ring->tqp->io_base + sprintf(result[j++], "%s", readl_relaxed(ring->tqp->io_base +
HNS3_RING_RX_EN_REG) ? "on" : "off"); HNS3_RING_RX_EN_REG) ? "on" : "off");
else else
sprintf(result[j++], "%10s", "NA"); sprintf(result[j++], "%s", "NA");
base_add_h = readl_relaxed(ring->tqp->io_base + base_add_h = readl_relaxed(ring->tqp->io_base +
HNS3_RING_RX_RING_BASEADDR_H_REG); HNS3_RING_RX_RING_BASEADDR_H_REG);
@ -700,36 +700,36 @@ static void hns3_dump_tx_queue_info(struct hns3_enet_ring *ring,
u32 base_add_l, base_add_h; u32 base_add_l, base_add_h;
u32 j = 0; u32 j = 0;
sprintf(result[j++], "%8u", index); sprintf(result[j++], "%u", index);
sprintf(result[j++], "%6u", readl_relaxed(ring->tqp->io_base + sprintf(result[j++], "%u", readl_relaxed(ring->tqp->io_base +
HNS3_RING_TX_RING_BD_NUM_REG)); HNS3_RING_TX_RING_BD_NUM_REG));
sprintf(result[j++], "%2u", readl_relaxed(ring->tqp->io_base + sprintf(result[j++], "%u", readl_relaxed(ring->tqp->io_base +
HNS3_RING_TX_RING_TC_REG)); HNS3_RING_TX_RING_TC_REG));
sprintf(result[j++], "%4u", readl_relaxed(ring->tqp->io_base + sprintf(result[j++], "%u", readl_relaxed(ring->tqp->io_base +
HNS3_RING_TX_RING_TAIL_REG)); HNS3_RING_TX_RING_TAIL_REG));
sprintf(result[j++], "%4u", readl_relaxed(ring->tqp->io_base + sprintf(result[j++], "%u", readl_relaxed(ring->tqp->io_base +
HNS3_RING_TX_RING_HEAD_REG)); HNS3_RING_TX_RING_HEAD_REG));
sprintf(result[j++], "%6u", readl_relaxed(ring->tqp->io_base + sprintf(result[j++], "%u", readl_relaxed(ring->tqp->io_base +
HNS3_RING_TX_RING_FBDNUM_REG)); HNS3_RING_TX_RING_FBDNUM_REG));
sprintf(result[j++], "%6u", readl_relaxed(ring->tqp->io_base + sprintf(result[j++], "%u", readl_relaxed(ring->tqp->io_base +
HNS3_RING_TX_RING_OFFSET_REG)); HNS3_RING_TX_RING_OFFSET_REG));
sprintf(result[j++], "%6u", readl_relaxed(ring->tqp->io_base + sprintf(result[j++], "%u", readl_relaxed(ring->tqp->io_base +
HNS3_RING_TX_RING_PKTNUM_RECORD_REG)); HNS3_RING_TX_RING_PKTNUM_RECORD_REG));
sprintf(result[j++], "%7s", readl_relaxed(ring->tqp->io_base + sprintf(result[j++], "%s", readl_relaxed(ring->tqp->io_base +
HNS3_RING_EN_REG) ? "on" : "off"); HNS3_RING_EN_REG) ? "on" : "off");
if (hnae3_ae_dev_tqp_txrx_indep_supported(ae_dev)) if (hnae3_ae_dev_tqp_txrx_indep_supported(ae_dev))
sprintf(result[j++], "%10s", readl_relaxed(ring->tqp->io_base + sprintf(result[j++], "%s", readl_relaxed(ring->tqp->io_base +
HNS3_RING_TX_EN_REG) ? "on" : "off"); HNS3_RING_TX_EN_REG) ? "on" : "off");
else else
sprintf(result[j++], "%10s", "NA"); sprintf(result[j++], "%s", "NA");
base_add_h = readl_relaxed(ring->tqp->io_base + base_add_h = readl_relaxed(ring->tqp->io_base +
HNS3_RING_TX_RING_BASEADDR_H_REG); HNS3_RING_TX_RING_BASEADDR_H_REG);
@ -848,15 +848,15 @@ static void hns3_dump_rx_bd_info(struct hns3_nic_priv *priv,
{ {
unsigned int j = 0; unsigned int j = 0;
sprintf(result[j++], "%5d", idx); sprintf(result[j++], "%d", idx);
sprintf(result[j++], "%#x", le32_to_cpu(desc->rx.l234_info)); sprintf(result[j++], "%#x", le32_to_cpu(desc->rx.l234_info));
sprintf(result[j++], "%7u", le16_to_cpu(desc->rx.pkt_len)); sprintf(result[j++], "%u", le16_to_cpu(desc->rx.pkt_len));
sprintf(result[j++], "%4u", le16_to_cpu(desc->rx.size)); sprintf(result[j++], "%u", le16_to_cpu(desc->rx.size));
sprintf(result[j++], "%#x", le32_to_cpu(desc->rx.rss_hash)); sprintf(result[j++], "%#x", le32_to_cpu(desc->rx.rss_hash));
sprintf(result[j++], "%5u", le16_to_cpu(desc->rx.fd_id)); sprintf(result[j++], "%u", le16_to_cpu(desc->rx.fd_id));
sprintf(result[j++], "%8u", le16_to_cpu(desc->rx.vlan_tag)); sprintf(result[j++], "%u", le16_to_cpu(desc->rx.vlan_tag));
sprintf(result[j++], "%15u", le16_to_cpu(desc->rx.o_dm_vlan_id_fb)); sprintf(result[j++], "%u", le16_to_cpu(desc->rx.o_dm_vlan_id_fb));
sprintf(result[j++], "%11u", le16_to_cpu(desc->rx.ot_vlan_tag)); sprintf(result[j++], "%u", le16_to_cpu(desc->rx.ot_vlan_tag));
sprintf(result[j++], "%#x", le32_to_cpu(desc->rx.bd_base_info)); sprintf(result[j++], "%#x", le32_to_cpu(desc->rx.bd_base_info));
if (test_bit(HNS3_NIC_STATE_RXD_ADV_LAYOUT_ENABLE, &priv->state)) { if (test_bit(HNS3_NIC_STATE_RXD_ADV_LAYOUT_ENABLE, &priv->state)) {
u32 ol_info = le32_to_cpu(desc->rx.ol_info); u32 ol_info = le32_to_cpu(desc->rx.ol_info);
@ -930,19 +930,19 @@ static void hns3_dump_tx_bd_info(struct hns3_nic_priv *priv,
{ {
unsigned int j = 0; unsigned int j = 0;
sprintf(result[j++], "%6d", idx); sprintf(result[j++], "%d", idx);
sprintf(result[j++], "%#llx", le64_to_cpu(desc->addr)); sprintf(result[j++], "%#llx", le64_to_cpu(desc->addr));
sprintf(result[j++], "%5u", le16_to_cpu(desc->tx.vlan_tag)); sprintf(result[j++], "%u", le16_to_cpu(desc->tx.vlan_tag));
sprintf(result[j++], "%5u", le16_to_cpu(desc->tx.send_size)); sprintf(result[j++], "%u", le16_to_cpu(desc->tx.send_size));
sprintf(result[j++], "%#x", sprintf(result[j++], "%#x",
le32_to_cpu(desc->tx.type_cs_vlan_tso_len)); le32_to_cpu(desc->tx.type_cs_vlan_tso_len));
sprintf(result[j++], "%5u", le16_to_cpu(desc->tx.outer_vlan_tag)); sprintf(result[j++], "%u", le16_to_cpu(desc->tx.outer_vlan_tag));
sprintf(result[j++], "%5u", le16_to_cpu(desc->tx.tv)); sprintf(result[j++], "%u", le16_to_cpu(desc->tx.tv));
sprintf(result[j++], "%10u", sprintf(result[j++], "%u",
le32_to_cpu(desc->tx.ol_type_vlan_len_msec)); le32_to_cpu(desc->tx.ol_type_vlan_len_msec));
sprintf(result[j++], "%#x", le32_to_cpu(desc->tx.paylen_ol4cs)); sprintf(result[j++], "%#x", le32_to_cpu(desc->tx.paylen_ol4cs));
sprintf(result[j++], "%#x", le16_to_cpu(desc->tx.bdtp_fe_sc_vld_ra_ri)); sprintf(result[j++], "%#x", le16_to_cpu(desc->tx.bdtp_fe_sc_vld_ra_ri));
sprintf(result[j++], "%5u", le16_to_cpu(desc->tx.mss_hw_csum)); sprintf(result[j++], "%u", le16_to_cpu(desc->tx.mss_hw_csum));
} }
static int hns3_dbg_tx_bd_info(struct hns3_dbg_data *d, char *buf, int len) static int hns3_dbg_tx_bd_info(struct hns3_dbg_data *d, char *buf, int len)

View File

@ -5203,6 +5203,13 @@ static void hns3_state_init(struct hnae3_handle *handle)
set_bit(HNS3_NIC_STATE_RXD_ADV_LAYOUT_ENABLE, &priv->state); set_bit(HNS3_NIC_STATE_RXD_ADV_LAYOUT_ENABLE, &priv->state);
} }
static void hns3_state_uninit(struct hnae3_handle *handle)
{
struct hns3_nic_priv *priv = handle->priv;
clear_bit(HNS3_NIC_STATE_INITED, &priv->state);
}
static int hns3_client_init(struct hnae3_handle *handle) static int hns3_client_init(struct hnae3_handle *handle)
{ {
struct pci_dev *pdev = handle->pdev; struct pci_dev *pdev = handle->pdev;
@ -5320,7 +5327,9 @@ static int hns3_client_init(struct hnae3_handle *handle)
return ret; return ret;
out_reg_netdev_fail: out_reg_netdev_fail:
hns3_state_uninit(handle);
hns3_dbg_uninit(handle); hns3_dbg_uninit(handle);
hns3_client_stop(handle);
out_client_start: out_client_start:
hns3_free_rx_cpu_rmap(netdev); hns3_free_rx_cpu_rmap(netdev);
hns3_nic_uninit_irq(priv); hns3_nic_uninit_irq(priv);

View File

@ -94,6 +94,13 @@ static int hclge_send_mbx_msg(struct hclge_vport *vport, u8 *msg, u16 msg_len,
enum hclge_comm_cmd_status status; enum hclge_comm_cmd_status status;
struct hclge_desc desc; struct hclge_desc desc;
if (msg_len > HCLGE_MBX_MAX_MSG_SIZE) {
dev_err(&hdev->pdev->dev,
"msg data length(=%u) exceeds maximum(=%u)\n",
msg_len, HCLGE_MBX_MAX_MSG_SIZE);
return -EMSGSIZE;
}
resp_pf_to_vf = (struct hclge_mbx_pf_to_vf_cmd *)desc.data; resp_pf_to_vf = (struct hclge_mbx_pf_to_vf_cmd *)desc.data;
hclge_cmd_setup_basic_desc(&desc, HCLGEVF_OPC_MBX_PF_TO_VF, false); hclge_cmd_setup_basic_desc(&desc, HCLGEVF_OPC_MBX_PF_TO_VF, false);
@ -176,7 +183,7 @@ static int hclge_get_ring_chain_from_mbx(
ring_num = req->msg.ring_num; ring_num = req->msg.ring_num;
if (ring_num > HCLGE_MBX_MAX_RING_CHAIN_PARAM_NUM) if (ring_num > HCLGE_MBX_MAX_RING_CHAIN_PARAM_NUM)
return -ENOMEM; return -EINVAL;
for (i = 0; i < ring_num; i++) { for (i = 0; i < ring_num; i++) {
if (req->msg.param[i].tqp_index >= vport->nic.kinfo.rss_size) { if (req->msg.param[i].tqp_index >= vport->nic.kinfo.rss_size) {
@ -587,9 +594,9 @@ static int hclge_set_vf_mtu(struct hclge_vport *vport,
return hclge_set_vport_mtu(vport, mtu); return hclge_set_vport_mtu(vport, mtu);
} }
static void hclge_get_queue_id_in_pf(struct hclge_vport *vport, static int hclge_get_queue_id_in_pf(struct hclge_vport *vport,
struct hclge_mbx_vf_to_pf_cmd *mbx_req, struct hclge_mbx_vf_to_pf_cmd *mbx_req,
struct hclge_respond_to_vf_msg *resp_msg) struct hclge_respond_to_vf_msg *resp_msg)
{ {
struct hnae3_handle *handle = &vport->nic; struct hnae3_handle *handle = &vport->nic;
struct hclge_dev *hdev = vport->back; struct hclge_dev *hdev = vport->back;
@ -599,17 +606,18 @@ static void hclge_get_queue_id_in_pf(struct hclge_vport *vport,
if (queue_id >= handle->kinfo.num_tqps) { if (queue_id >= handle->kinfo.num_tqps) {
dev_err(&hdev->pdev->dev, "Invalid queue id(%u) from VF %u\n", dev_err(&hdev->pdev->dev, "Invalid queue id(%u) from VF %u\n",
queue_id, mbx_req->mbx_src_vfid); queue_id, mbx_req->mbx_src_vfid);
return; return -EINVAL;
} }
qid_in_pf = hclge_covert_handle_qid_global(&vport->nic, queue_id); qid_in_pf = hclge_covert_handle_qid_global(&vport->nic, queue_id);
memcpy(resp_msg->data, &qid_in_pf, sizeof(qid_in_pf)); memcpy(resp_msg->data, &qid_in_pf, sizeof(qid_in_pf));
resp_msg->len = sizeof(qid_in_pf); resp_msg->len = sizeof(qid_in_pf);
return 0;
} }
static void hclge_get_rss_key(struct hclge_vport *vport, static int hclge_get_rss_key(struct hclge_vport *vport,
struct hclge_mbx_vf_to_pf_cmd *mbx_req, struct hclge_mbx_vf_to_pf_cmd *mbx_req,
struct hclge_respond_to_vf_msg *resp_msg) struct hclge_respond_to_vf_msg *resp_msg)
{ {
#define HCLGE_RSS_MBX_RESP_LEN 8 #define HCLGE_RSS_MBX_RESP_LEN 8
struct hclge_dev *hdev = vport->back; struct hclge_dev *hdev = vport->back;
@ -627,13 +635,14 @@ static void hclge_get_rss_key(struct hclge_vport *vport,
dev_warn(&hdev->pdev->dev, dev_warn(&hdev->pdev->dev,
"failed to get the rss hash key, the index(%u) invalid !\n", "failed to get the rss hash key, the index(%u) invalid !\n",
index); index);
return; return -EINVAL;
} }
memcpy(resp_msg->data, memcpy(resp_msg->data,
&rss_cfg->rss_hash_key[index * HCLGE_RSS_MBX_RESP_LEN], &rss_cfg->rss_hash_key[index * HCLGE_RSS_MBX_RESP_LEN],
HCLGE_RSS_MBX_RESP_LEN); HCLGE_RSS_MBX_RESP_LEN);
resp_msg->len = HCLGE_RSS_MBX_RESP_LEN; resp_msg->len = HCLGE_RSS_MBX_RESP_LEN;
return 0;
} }
static void hclge_link_fail_parse(struct hclge_dev *hdev, u8 link_fail_code) static void hclge_link_fail_parse(struct hclge_dev *hdev, u8 link_fail_code)
@ -809,10 +818,10 @@ void hclge_mbx_handler(struct hclge_dev *hdev)
"VF fail(%d) to set mtu\n", ret); "VF fail(%d) to set mtu\n", ret);
break; break;
case HCLGE_MBX_GET_QID_IN_PF: case HCLGE_MBX_GET_QID_IN_PF:
hclge_get_queue_id_in_pf(vport, req, &resp_msg); ret = hclge_get_queue_id_in_pf(vport, req, &resp_msg);
break; break;
case HCLGE_MBX_GET_RSS_KEY: case HCLGE_MBX_GET_RSS_KEY:
hclge_get_rss_key(vport, req, &resp_msg); ret = hclge_get_rss_key(vport, req, &resp_msg);
break; break;
case HCLGE_MBX_GET_LINK_MODE: case HCLGE_MBX_GET_LINK_MODE:
hclge_get_link_mode(vport, req); hclge_get_link_mode(vport, req);

View File

@ -3210,13 +3210,8 @@ static void ibmvnic_get_ringparam(struct net_device *netdev,
{ {
struct ibmvnic_adapter *adapter = netdev_priv(netdev); struct ibmvnic_adapter *adapter = netdev_priv(netdev);
if (adapter->priv_flags & IBMVNIC_USE_SERVER_MAXES) { ring->rx_max_pending = adapter->max_rx_add_entries_per_subcrq;
ring->rx_max_pending = adapter->max_rx_add_entries_per_subcrq; ring->tx_max_pending = adapter->max_tx_entries_per_subcrq;
ring->tx_max_pending = adapter->max_tx_entries_per_subcrq;
} else {
ring->rx_max_pending = IBMVNIC_MAX_QUEUE_SZ;
ring->tx_max_pending = IBMVNIC_MAX_QUEUE_SZ;
}
ring->rx_mini_max_pending = 0; ring->rx_mini_max_pending = 0;
ring->rx_jumbo_max_pending = 0; ring->rx_jumbo_max_pending = 0;
ring->rx_pending = adapter->req_rx_add_entries_per_subcrq; ring->rx_pending = adapter->req_rx_add_entries_per_subcrq;
@ -3231,23 +3226,21 @@ static int ibmvnic_set_ringparam(struct net_device *netdev,
struct netlink_ext_ack *extack) struct netlink_ext_ack *extack)
{ {
struct ibmvnic_adapter *adapter = netdev_priv(netdev); struct ibmvnic_adapter *adapter = netdev_priv(netdev);
int ret;
ret = 0; if (ring->rx_pending > adapter->max_rx_add_entries_per_subcrq ||
ring->tx_pending > adapter->max_tx_entries_per_subcrq) {
netdev_err(netdev, "Invalid request.\n");
netdev_err(netdev, "Max tx buffers = %llu\n",
adapter->max_rx_add_entries_per_subcrq);
netdev_err(netdev, "Max rx buffers = %llu\n",
adapter->max_tx_entries_per_subcrq);
return -EINVAL;
}
adapter->desired.rx_entries = ring->rx_pending; adapter->desired.rx_entries = ring->rx_pending;
adapter->desired.tx_entries = ring->tx_pending; adapter->desired.tx_entries = ring->tx_pending;
ret = wait_for_reset(adapter); return wait_for_reset(adapter);
if (!ret &&
(adapter->req_rx_add_entries_per_subcrq != ring->rx_pending ||
adapter->req_tx_entries_per_subcrq != ring->tx_pending))
netdev_info(netdev,
"Could not match full ringsize request. Requested: RX %d, TX %d; Allowed: RX %llu, TX %llu\n",
ring->rx_pending, ring->tx_pending,
adapter->req_rx_add_entries_per_subcrq,
adapter->req_tx_entries_per_subcrq);
return ret;
} }
static void ibmvnic_get_channels(struct net_device *netdev, static void ibmvnic_get_channels(struct net_device *netdev,
@ -3255,14 +3248,8 @@ static void ibmvnic_get_channels(struct net_device *netdev,
{ {
struct ibmvnic_adapter *adapter = netdev_priv(netdev); struct ibmvnic_adapter *adapter = netdev_priv(netdev);
if (adapter->priv_flags & IBMVNIC_USE_SERVER_MAXES) { channels->max_rx = adapter->max_rx_queues;
channels->max_rx = adapter->max_rx_queues; channels->max_tx = adapter->max_tx_queues;
channels->max_tx = adapter->max_tx_queues;
} else {
channels->max_rx = IBMVNIC_MAX_QUEUES;
channels->max_tx = IBMVNIC_MAX_QUEUES;
}
channels->max_other = 0; channels->max_other = 0;
channels->max_combined = 0; channels->max_combined = 0;
channels->rx_count = adapter->req_rx_queues; channels->rx_count = adapter->req_rx_queues;
@ -3275,22 +3262,11 @@ static int ibmvnic_set_channels(struct net_device *netdev,
struct ethtool_channels *channels) struct ethtool_channels *channels)
{ {
struct ibmvnic_adapter *adapter = netdev_priv(netdev); struct ibmvnic_adapter *adapter = netdev_priv(netdev);
int ret;
ret = 0;
adapter->desired.rx_queues = channels->rx_count; adapter->desired.rx_queues = channels->rx_count;
adapter->desired.tx_queues = channels->tx_count; adapter->desired.tx_queues = channels->tx_count;
ret = wait_for_reset(adapter); return wait_for_reset(adapter);
if (!ret &&
(adapter->req_rx_queues != channels->rx_count ||
adapter->req_tx_queues != channels->tx_count))
netdev_info(netdev,
"Could not match full channels request. Requested: RX %d, TX %d; Allowed: RX %llu, TX %llu\n",
channels->rx_count, channels->tx_count,
adapter->req_rx_queues, adapter->req_tx_queues);
return ret;
} }
static void ibmvnic_get_strings(struct net_device *dev, u32 stringset, u8 *data) static void ibmvnic_get_strings(struct net_device *dev, u32 stringset, u8 *data)
@ -3298,43 +3274,32 @@ static void ibmvnic_get_strings(struct net_device *dev, u32 stringset, u8 *data)
struct ibmvnic_adapter *adapter = netdev_priv(dev); struct ibmvnic_adapter *adapter = netdev_priv(dev);
int i; int i;
switch (stringset) { if (stringset != ETH_SS_STATS)
case ETH_SS_STATS:
for (i = 0; i < ARRAY_SIZE(ibmvnic_stats);
i++, data += ETH_GSTRING_LEN)
memcpy(data, ibmvnic_stats[i].name, ETH_GSTRING_LEN);
for (i = 0; i < adapter->req_tx_queues; i++) {
snprintf(data, ETH_GSTRING_LEN, "tx%d_packets", i);
data += ETH_GSTRING_LEN;
snprintf(data, ETH_GSTRING_LEN, "tx%d_bytes", i);
data += ETH_GSTRING_LEN;
snprintf(data, ETH_GSTRING_LEN,
"tx%d_dropped_packets", i);
data += ETH_GSTRING_LEN;
}
for (i = 0; i < adapter->req_rx_queues; i++) {
snprintf(data, ETH_GSTRING_LEN, "rx%d_packets", i);
data += ETH_GSTRING_LEN;
snprintf(data, ETH_GSTRING_LEN, "rx%d_bytes", i);
data += ETH_GSTRING_LEN;
snprintf(data, ETH_GSTRING_LEN, "rx%d_interrupts", i);
data += ETH_GSTRING_LEN;
}
break;
case ETH_SS_PRIV_FLAGS:
for (i = 0; i < ARRAY_SIZE(ibmvnic_priv_flags); i++)
strcpy(data + i * ETH_GSTRING_LEN,
ibmvnic_priv_flags[i]);
break;
default:
return; return;
for (i = 0; i < ARRAY_SIZE(ibmvnic_stats); i++, data += ETH_GSTRING_LEN)
memcpy(data, ibmvnic_stats[i].name, ETH_GSTRING_LEN);
for (i = 0; i < adapter->req_tx_queues; i++) {
snprintf(data, ETH_GSTRING_LEN, "tx%d_packets", i);
data += ETH_GSTRING_LEN;
snprintf(data, ETH_GSTRING_LEN, "tx%d_bytes", i);
data += ETH_GSTRING_LEN;
snprintf(data, ETH_GSTRING_LEN, "tx%d_dropped_packets", i);
data += ETH_GSTRING_LEN;
}
for (i = 0; i < adapter->req_rx_queues; i++) {
snprintf(data, ETH_GSTRING_LEN, "rx%d_packets", i);
data += ETH_GSTRING_LEN;
snprintf(data, ETH_GSTRING_LEN, "rx%d_bytes", i);
data += ETH_GSTRING_LEN;
snprintf(data, ETH_GSTRING_LEN, "rx%d_interrupts", i);
data += ETH_GSTRING_LEN;
} }
} }
@ -3347,8 +3312,6 @@ static int ibmvnic_get_sset_count(struct net_device *dev, int sset)
return ARRAY_SIZE(ibmvnic_stats) + return ARRAY_SIZE(ibmvnic_stats) +
adapter->req_tx_queues * NUM_TX_STATS + adapter->req_tx_queues * NUM_TX_STATS +
adapter->req_rx_queues * NUM_RX_STATS; adapter->req_rx_queues * NUM_RX_STATS;
case ETH_SS_PRIV_FLAGS:
return ARRAY_SIZE(ibmvnic_priv_flags);
default: default:
return -EOPNOTSUPP; return -EOPNOTSUPP;
} }
@ -3401,26 +3364,6 @@ static void ibmvnic_get_ethtool_stats(struct net_device *dev,
} }
} }
static u32 ibmvnic_get_priv_flags(struct net_device *netdev)
{
struct ibmvnic_adapter *adapter = netdev_priv(netdev);
return adapter->priv_flags;
}
static int ibmvnic_set_priv_flags(struct net_device *netdev, u32 flags)
{
struct ibmvnic_adapter *adapter = netdev_priv(netdev);
bool which_maxes = !!(flags & IBMVNIC_USE_SERVER_MAXES);
if (which_maxes)
adapter->priv_flags |= IBMVNIC_USE_SERVER_MAXES;
else
adapter->priv_flags &= ~IBMVNIC_USE_SERVER_MAXES;
return 0;
}
static const struct ethtool_ops ibmvnic_ethtool_ops = { static const struct ethtool_ops ibmvnic_ethtool_ops = {
.get_drvinfo = ibmvnic_get_drvinfo, .get_drvinfo = ibmvnic_get_drvinfo,
.get_msglevel = ibmvnic_get_msglevel, .get_msglevel = ibmvnic_get_msglevel,
@ -3434,8 +3377,6 @@ static const struct ethtool_ops ibmvnic_ethtool_ops = {
.get_sset_count = ibmvnic_get_sset_count, .get_sset_count = ibmvnic_get_sset_count,
.get_ethtool_stats = ibmvnic_get_ethtool_stats, .get_ethtool_stats = ibmvnic_get_ethtool_stats,
.get_link_ksettings = ibmvnic_get_link_ksettings, .get_link_ksettings = ibmvnic_get_link_ksettings,
.get_priv_flags = ibmvnic_get_priv_flags,
.set_priv_flags = ibmvnic_set_priv_flags,
}; };
/* Routines for managing CRQs/sCRQs */ /* Routines for managing CRQs/sCRQs */

View File

@ -41,11 +41,6 @@
#define IBMVNIC_RESET_DELAY 100 #define IBMVNIC_RESET_DELAY 100
static const char ibmvnic_priv_flags[][ETH_GSTRING_LEN] = {
#define IBMVNIC_USE_SERVER_MAXES 0x1
"use-server-maxes"
};
struct ibmvnic_login_buffer { struct ibmvnic_login_buffer {
__be32 len; __be32 len;
__be32 version; __be32 version;
@ -883,7 +878,6 @@ struct ibmvnic_adapter {
struct ibmvnic_control_ip_offload_buffer ip_offload_ctrl; struct ibmvnic_control_ip_offload_buffer ip_offload_ctrl;
dma_addr_t ip_offload_ctrl_tok; dma_addr_t ip_offload_ctrl_tok;
u32 msg_enable; u32 msg_enable;
u32 priv_flags;
/* Vital Product Data (VPD) */ /* Vital Product Data (VPD) */
struct ibmvnic_vpd *vpd; struct ibmvnic_vpd *vpd;

View File

@ -6929,12 +6929,15 @@ static void ice_rebuild(struct ice_pf *pf, enum ice_reset_req reset_type)
dev_dbg(dev, "rebuilding PF after reset_type=%d\n", reset_type); dev_dbg(dev, "rebuilding PF after reset_type=%d\n", reset_type);
#define ICE_EMP_RESET_SLEEP_MS 5000
if (reset_type == ICE_RESET_EMPR) { if (reset_type == ICE_RESET_EMPR) {
/* If an EMP reset has occurred, any previously pending flash /* If an EMP reset has occurred, any previously pending flash
* update will have completed. We no longer know whether or * update will have completed. We no longer know whether or
* not the NVM update EMP reset is restricted. * not the NVM update EMP reset is restricted.
*/ */
pf->fw_emp_reset_disabled = false; pf->fw_emp_reset_disabled = false;
msleep(ICE_EMP_RESET_SLEEP_MS);
} }
err = ice_init_all_ctrlq(hw); err = ice_init_all_ctrlq(hw);

View File

@ -1046,8 +1046,8 @@ int ice_sriov_configure(struct pci_dev *pdev, int num_vfs)
if (!num_vfs) { if (!num_vfs) {
if (!pci_vfs_assigned(pdev)) { if (!pci_vfs_assigned(pdev)) {
ice_mbx_deinit_snapshot(&pf->hw);
ice_free_vfs(pf); ice_free_vfs(pf);
ice_mbx_deinit_snapshot(&pf->hw);
if (pf->lag) if (pf->lag)
ice_enable_lag(pf->lag); ice_enable_lag(pf->lag);
return 0; return 0;

View File

@ -3625,6 +3625,8 @@ void ice_vc_process_vf_msg(struct ice_pf *pf, struct ice_rq_event_info *event)
return; return;
} }
mutex_lock(&vf->cfg_lock);
/* Check if VF is disabled. */ /* Check if VF is disabled. */
if (test_bit(ICE_VF_STATE_DIS, vf->vf_states)) { if (test_bit(ICE_VF_STATE_DIS, vf->vf_states)) {
err = -EPERM; err = -EPERM;
@ -3642,32 +3644,20 @@ void ice_vc_process_vf_msg(struct ice_pf *pf, struct ice_rq_event_info *event)
err = -EINVAL; err = -EINVAL;
} }
if (!ice_vc_is_opcode_allowed(vf, v_opcode)) {
ice_vc_send_msg_to_vf(vf, v_opcode,
VIRTCHNL_STATUS_ERR_NOT_SUPPORTED, NULL,
0);
ice_put_vf(vf);
return;
}
error_handler: error_handler:
if (err) { if (err) {
ice_vc_send_msg_to_vf(vf, v_opcode, VIRTCHNL_STATUS_ERR_PARAM, ice_vc_send_msg_to_vf(vf, v_opcode, VIRTCHNL_STATUS_ERR_PARAM,
NULL, 0); NULL, 0);
dev_err(dev, "Invalid message from VF %d, opcode %d, len %d, error %d\n", dev_err(dev, "Invalid message from VF %d, opcode %d, len %d, error %d\n",
vf_id, v_opcode, msglen, err); vf_id, v_opcode, msglen, err);
ice_put_vf(vf); goto finish;
return;
} }
/* VF is being configured in another context that triggers a VFR, so no if (!ice_vc_is_opcode_allowed(vf, v_opcode)) {
* need to process this message ice_vc_send_msg_to_vf(vf, v_opcode,
*/ VIRTCHNL_STATUS_ERR_NOT_SUPPORTED, NULL,
if (!mutex_trylock(&vf->cfg_lock)) { 0);
dev_info(dev, "VF %u is being configured in another context that will trigger a VFR, so there is no need to handle this message\n", goto finish;
vf->vf_id);
ice_put_vf(vf);
return;
} }
switch (v_opcode) { switch (v_opcode) {
@ -3780,6 +3770,7 @@ error_handler:
vf_id, v_opcode, err); vf_id, v_opcode, err);
} }
finish:
mutex_unlock(&vf->cfg_lock); mutex_unlock(&vf->cfg_lock);
ice_put_vf(vf); ice_put_vf(vf);
} }

View File

@ -903,7 +903,8 @@ int ixgbe_ipsec_vf_add_sa(struct ixgbe_adapter *adapter, u32 *msgbuf, u32 vf)
/* Tx IPsec offload doesn't seem to work on this /* Tx IPsec offload doesn't seem to work on this
* device, so block these requests for now. * device, so block these requests for now.
*/ */
if (!(sam->flags & XFRM_OFFLOAD_INBOUND)) { sam->flags = sam->flags & ~XFRM_OFFLOAD_IPV6;
if (sam->flags != XFRM_OFFLOAD_INBOUND) {
err = -EOPNOTSUPP; err = -EOPNOTSUPP;
goto err_out; goto err_out;
} }

View File

@ -346,7 +346,7 @@ static void lan966x_mac_irq_process(struct lan966x *lan966x, u32 row,
lan966x_mac_process_raw_entry(&raw_entries[column], lan966x_mac_process_raw_entry(&raw_entries[column],
mac, &vid, &dest_idx); mac, &vid, &dest_idx);
if (WARN_ON(dest_idx > lan966x->num_phys_ports)) if (WARN_ON(dest_idx >= lan966x->num_phys_ports))
continue; continue;
/* If the entry in SW is found, then there is nothing /* If the entry in SW is found, then there is nothing
@ -393,7 +393,7 @@ static void lan966x_mac_irq_process(struct lan966x *lan966x, u32 row,
lan966x_mac_process_raw_entry(&raw_entries[column], lan966x_mac_process_raw_entry(&raw_entries[column],
mac, &vid, &dest_idx); mac, &vid, &dest_idx);
if (WARN_ON(dest_idx > lan966x->num_phys_ports)) if (WARN_ON(dest_idx >= lan966x->num_phys_ports))
continue; continue;
mac_entry = lan966x_mac_alloc_entry(mac, vid, dest_idx); mac_entry = lan966x_mac_alloc_entry(mac, vid, dest_idx);

View File

@ -551,7 +551,7 @@ int ocelot_port_vlan_filtering(struct ocelot *ocelot, int port,
struct ocelot_vcap_block *block = &ocelot->block[VCAP_IS1]; struct ocelot_vcap_block *block = &ocelot->block[VCAP_IS1];
struct ocelot_port *ocelot_port = ocelot->ports[port]; struct ocelot_port *ocelot_port = ocelot->ports[port];
struct ocelot_vcap_filter *filter; struct ocelot_vcap_filter *filter;
int err; int err = 0;
u32 val; u32 val;
list_for_each_entry(filter, &block->rules, list) { list_for_each_entry(filter, &block->rules, list) {
@ -570,7 +570,7 @@ int ocelot_port_vlan_filtering(struct ocelot *ocelot, int port,
if (vlan_aware) if (vlan_aware)
err = ocelot_del_vlan_unaware_pvid(ocelot, port, err = ocelot_del_vlan_unaware_pvid(ocelot, port,
ocelot_port->bridge); ocelot_port->bridge);
else else if (ocelot_port->bridge)
err = ocelot_add_vlan_unaware_pvid(ocelot, port, err = ocelot_add_vlan_unaware_pvid(ocelot, port,
ocelot_port->bridge); ocelot_port->bridge);
if (err) if (err)
@ -629,6 +629,13 @@ int ocelot_vlan_add(struct ocelot *ocelot, int port, u16 vid, bool pvid,
{ {
int err; int err;
/* Ignore VID 0 added to our RX filter by the 8021q module, since
* that collides with OCELOT_STANDALONE_PVID and changes it from
* egress-untagged to egress-tagged.
*/
if (!vid)
return 0;
err = ocelot_vlan_member_add(ocelot, port, vid, untagged); err = ocelot_vlan_member_add(ocelot, port, vid, untagged);
if (err) if (err)
return err; return err;
@ -651,6 +658,9 @@ int ocelot_vlan_del(struct ocelot *ocelot, int port, u16 vid)
bool del_pvid = false; bool del_pvid = false;
int err; int err;
if (!vid)
return 0;
if (ocelot_port->pvid_vlan && ocelot_port->pvid_vlan->vid == vid) if (ocelot_port->pvid_vlan && ocelot_port->pvid_vlan->vid == vid)
del_pvid = true; del_pvid = true;

View File

@ -65,8 +65,9 @@ static void socfpga_dwmac_fix_mac_speed(void *priv, unsigned int speed)
struct phy_device *phy_dev = ndev->phydev; struct phy_device *phy_dev = ndev->phydev;
u32 val; u32 val;
writew(SGMII_ADAPTER_DISABLE, if (sgmii_adapter_base)
sgmii_adapter_base + SGMII_ADAPTER_CTRL_REG); writew(SGMII_ADAPTER_DISABLE,
sgmii_adapter_base + SGMII_ADAPTER_CTRL_REG);
if (splitter_base) { if (splitter_base) {
val = readl(splitter_base + EMAC_SPLITTER_CTRL_REG); val = readl(splitter_base + EMAC_SPLITTER_CTRL_REG);
@ -88,10 +89,11 @@ static void socfpga_dwmac_fix_mac_speed(void *priv, unsigned int speed)
writel(val, splitter_base + EMAC_SPLITTER_CTRL_REG); writel(val, splitter_base + EMAC_SPLITTER_CTRL_REG);
} }
writew(SGMII_ADAPTER_ENABLE, if (phy_dev && sgmii_adapter_base) {
sgmii_adapter_base + SGMII_ADAPTER_CTRL_REG); writew(SGMII_ADAPTER_ENABLE,
if (phy_dev) sgmii_adapter_base + SGMII_ADAPTER_CTRL_REG);
tse_pcs_fix_mac_speed(&dwmac->pcs, phy_dev, speed); tse_pcs_fix_mac_speed(&dwmac->pcs, phy_dev, speed);
}
} }
static int socfpga_dwmac_parse_data(struct socfpga_dwmac *dwmac, struct device *dev) static int socfpga_dwmac_parse_data(struct socfpga_dwmac *dwmac, struct device *dev)

View File

@ -880,7 +880,7 @@ static int mv3310_read_status_copper(struct phy_device *phydev)
cssr1 = phy_read_mmd(phydev, MDIO_MMD_PCS, MV_PCS_CSSR1); cssr1 = phy_read_mmd(phydev, MDIO_MMD_PCS, MV_PCS_CSSR1);
if (cssr1 < 0) if (cssr1 < 0)
return val; return cssr1;
/* If the link settings are not resolved, mark the link down */ /* If the link settings are not resolved, mark the link down */
if (!(cssr1 & MV_PCS_CSSR1_RESOLVED)) { if (!(cssr1 & MV_PCS_CSSR1_RESOLVED)) {

View File

@ -1005,6 +1005,24 @@ static struct sk_buff *receive_mergeable(struct net_device *dev,
* xdp.data_meta were adjusted * xdp.data_meta were adjusted
*/ */
len = xdp.data_end - xdp.data + vi->hdr_len + metasize; len = xdp.data_end - xdp.data + vi->hdr_len + metasize;
/* recalculate headroom if xdp.data or xdp_data_meta
* were adjusted, note that offset should always point
* to the start of the reserved bytes for virtio_net
* header which are followed by xdp.data, that means
* that offset is equal to the headroom (when buf is
* starting at the beginning of the page, otherwise
* there is a base offset inside the page) but it's used
* with a different starting point (buf start) than
* xdp.data (buf start + vnet hdr size). If xdp.data or
* data_meta were adjusted by the xdp prog then the
* headroom size has changed and so has the offset, we
* can use data_hard_start, which points at buf start +
* vnet hdr size, to calculate the new headroom and use
* it later to compute buf start in page_to_skb()
*/
headroom = xdp.data - xdp.data_hard_start - metasize;
/* We can only create skb based on xdp_page. */ /* We can only create skb based on xdp_page. */
if (unlikely(xdp_page != page)) { if (unlikely(xdp_page != page)) {
rcu_read_unlock(); rcu_read_unlock();
@ -1012,7 +1030,7 @@ static struct sk_buff *receive_mergeable(struct net_device *dev,
head_skb = page_to_skb(vi, rq, xdp_page, offset, head_skb = page_to_skb(vi, rq, xdp_page, offset,
len, PAGE_SIZE, false, len, PAGE_SIZE, false,
metasize, metasize,
VIRTIO_XDP_HEADROOM); headroom);
return head_skb; return head_skb;
} }
break; break;

View File

@ -349,7 +349,7 @@ static int __init cosa_init(void)
} }
} else { } else {
cosa_major = register_chrdev(0, "cosa", &cosa_fops); cosa_major = register_chrdev(0, "cosa", &cosa_fops);
if (!cosa_major) { if (cosa_major < 0) {
pr_warn("unable to register chardev\n"); pr_warn("unable to register chardev\n");
err = -EIO; err = -EIO;
goto out; goto out;

View File

@ -19,6 +19,7 @@
#include <linux/if_arp.h> #include <linux/if_arp.h>
#include <linux/icmp.h> #include <linux/icmp.h>
#include <linux/suspend.h> #include <linux/suspend.h>
#include <net/dst_metadata.h>
#include <net/icmp.h> #include <net/icmp.h>
#include <net/rtnetlink.h> #include <net/rtnetlink.h>
#include <net/ip_tunnels.h> #include <net/ip_tunnels.h>
@ -167,7 +168,7 @@ static netdev_tx_t wg_xmit(struct sk_buff *skb, struct net_device *dev)
goto err_peer; goto err_peer;
} }
mtu = skb_dst(skb) ? dst_mtu(skb_dst(skb)) : dev->mtu; mtu = skb_valid_dst(skb) ? dst_mtu(skb_dst(skb)) : dev->mtu;
__skb_queue_head_init(&packets); __skb_queue_head_init(&packets);
if (!skb_is_gso(skb)) { if (!skb_is_gso(skb)) {

View File

@ -199,10 +199,10 @@ struct net_device_stats {
* Try to fit them in a single cache line, for dev_get_stats() sake. * Try to fit them in a single cache line, for dev_get_stats() sake.
*/ */
struct net_device_core_stats { struct net_device_core_stats {
local_t rx_dropped; unsigned long rx_dropped;
local_t tx_dropped; unsigned long tx_dropped;
local_t rx_nohandler; unsigned long rx_nohandler;
} __aligned(4 * sizeof(local_t)); } __aligned(4 * sizeof(unsigned long));
#include <linux/cache.h> #include <linux/cache.h>
#include <linux/skbuff.h> #include <linux/skbuff.h>
@ -3843,15 +3843,15 @@ static __always_inline bool __is_skb_forwardable(const struct net_device *dev,
return false; return false;
} }
struct net_device_core_stats *netdev_core_stats_alloc(struct net_device *dev); struct net_device_core_stats __percpu *netdev_core_stats_alloc(struct net_device *dev);
static inline struct net_device_core_stats *dev_core_stats(struct net_device *dev) static inline struct net_device_core_stats __percpu *dev_core_stats(struct net_device *dev)
{ {
/* This READ_ONCE() pairs with the write in netdev_core_stats_alloc() */ /* This READ_ONCE() pairs with the write in netdev_core_stats_alloc() */
struct net_device_core_stats __percpu *p = READ_ONCE(dev->core_stats); struct net_device_core_stats __percpu *p = READ_ONCE(dev->core_stats);
if (likely(p)) if (likely(p))
return this_cpu_ptr(p); return p;
return netdev_core_stats_alloc(dev); return netdev_core_stats_alloc(dev);
} }
@ -3859,14 +3859,11 @@ static inline struct net_device_core_stats *dev_core_stats(struct net_device *de
#define DEV_CORE_STATS_INC(FIELD) \ #define DEV_CORE_STATS_INC(FIELD) \
static inline void dev_core_stats_##FIELD##_inc(struct net_device *dev) \ static inline void dev_core_stats_##FIELD##_inc(struct net_device *dev) \
{ \ { \
struct net_device_core_stats *p; \ struct net_device_core_stats __percpu *p; \
\ \
preempt_disable(); \
p = dev_core_stats(dev); \ p = dev_core_stats(dev); \
\
if (p) \ if (p) \
local_inc(&p->FIELD); \ this_cpu_inc(p->FIELD); \
preempt_enable(); \
} }
DEV_CORE_STATS_INC(rx_dropped) DEV_CORE_STATS_INC(rx_dropped)
DEV_CORE_STATS_INC(tx_dropped) DEV_CORE_STATS_INC(tx_dropped)

View File

@ -578,6 +578,7 @@ enum {
#define HCI_ERROR_CONNECTION_TIMEOUT 0x08 #define HCI_ERROR_CONNECTION_TIMEOUT 0x08
#define HCI_ERROR_REJ_LIMITED_RESOURCES 0x0d #define HCI_ERROR_REJ_LIMITED_RESOURCES 0x0d
#define HCI_ERROR_REJ_BAD_ADDR 0x0f #define HCI_ERROR_REJ_BAD_ADDR 0x0f
#define HCI_ERROR_INVALID_PARAMETERS 0x12
#define HCI_ERROR_REMOTE_USER_TERM 0x13 #define HCI_ERROR_REMOTE_USER_TERM 0x13
#define HCI_ERROR_REMOTE_LOW_RESOURCES 0x14 #define HCI_ERROR_REMOTE_LOW_RESOURCES 0x14
#define HCI_ERROR_REMOTE_POWER_OFF 0x15 #define HCI_ERROR_REMOTE_POWER_OFF 0x15

View File

@ -1156,7 +1156,7 @@ int hci_conn_switch_role(struct hci_conn *conn, __u8 role);
void hci_conn_enter_active_mode(struct hci_conn *conn, __u8 force_active); void hci_conn_enter_active_mode(struct hci_conn *conn, __u8 force_active);
void hci_le_conn_failed(struct hci_conn *conn, u8 status); void hci_conn_failed(struct hci_conn *conn, u8 status);
/* /*
* hci_conn_get() and hci_conn_put() are used to control the life-time of an * hci_conn_get() and hci_conn_put() are used to control the life-time of an

View File

@ -58,7 +58,7 @@ struct ip6_tnl {
/* These fields used only by GRE */ /* These fields used only by GRE */
__u32 i_seqno; /* The last seen seqno */ __u32 i_seqno; /* The last seen seqno */
__u32 o_seqno; /* The last output seqno */ atomic_t o_seqno; /* The last output seqno */
int hlen; /* tun_hlen + encap_hlen */ int hlen; /* tun_hlen + encap_hlen */
int tun_hlen; /* Precalculated header length */ int tun_hlen; /* Precalculated header length */
int encap_hlen; /* Encap header length (FOU,GUE) */ int encap_hlen; /* Encap header length (FOU,GUE) */

View File

@ -116,7 +116,7 @@ struct ip_tunnel {
/* These four fields used only by GRE */ /* These four fields used only by GRE */
u32 i_seqno; /* The last seen seqno */ u32 i_seqno; /* The last seen seqno */
u32 o_seqno; /* The last output seqno */ atomic_t o_seqno; /* The last output seqno */
int tun_hlen; /* Precalculated header length */ int tun_hlen; /* Precalculated header length */
/* These four fields used only by ERSPAN */ /* These four fields used only by ERSPAN */

View File

@ -480,6 +480,7 @@ int __cookie_v4_check(const struct iphdr *iph, const struct tcphdr *th,
u32 cookie); u32 cookie);
struct sock *cookie_v4_check(struct sock *sk, struct sk_buff *skb); struct sock *cookie_v4_check(struct sock *sk, struct sk_buff *skb);
struct request_sock *cookie_tcp_reqsk_alloc(const struct request_sock_ops *ops, struct request_sock *cookie_tcp_reqsk_alloc(const struct request_sock_ops *ops,
const struct tcp_request_sock_ops *af_ops,
struct sock *sk, struct sk_buff *skb); struct sock *sk, struct sk_buff *skb);
#ifdef CONFIG_SYN_COOKIES #ifdef CONFIG_SYN_COOKIES
@ -620,6 +621,7 @@ void tcp_synack_rtt_meas(struct sock *sk, struct request_sock *req);
void tcp_reset(struct sock *sk, struct sk_buff *skb); void tcp_reset(struct sock *sk, struct sk_buff *skb);
void tcp_skb_mark_lost_uncond_verify(struct tcp_sock *tp, struct sk_buff *skb); void tcp_skb_mark_lost_uncond_verify(struct tcp_sock *tp, struct sk_buff *skb);
void tcp_fin(struct sock *sk); void tcp_fin(struct sock *sk);
void tcp_check_space(struct sock *sk);
/* tcp_timer.c */ /* tcp_timer.c */
void tcp_init_xmit_timers(struct sock *); void tcp_init_xmit_timers(struct sock *);
@ -1042,6 +1044,7 @@ struct rate_sample {
int losses; /* number of packets marked lost upon ACK */ int losses; /* number of packets marked lost upon ACK */
u32 acked_sacked; /* number of packets newly (S)ACKed upon ACK */ u32 acked_sacked; /* number of packets newly (S)ACKed upon ACK */
u32 prior_in_flight; /* in flight before this ACK */ u32 prior_in_flight; /* in flight before this ACK */
u32 last_end_seq; /* end_seq of most recently ACKed packet */
bool is_app_limited; /* is sample from packet with bubble in pipe? */ bool is_app_limited; /* is sample from packet with bubble in pipe? */
bool is_retrans; /* is sample from retransmission? */ bool is_retrans; /* is sample from retransmission? */
bool is_ack_delayed; /* is this (likely) a delayed ACK? */ bool is_ack_delayed; /* is this (likely) a delayed ACK? */
@ -1164,6 +1167,11 @@ void tcp_rate_gen(struct sock *sk, u32 delivered, u32 lost,
bool is_sack_reneg, struct rate_sample *rs); bool is_sack_reneg, struct rate_sample *rs);
void tcp_rate_check_app_limited(struct sock *sk); void tcp_rate_check_app_limited(struct sock *sk);
static inline bool tcp_skb_sent_after(u64 t1, u64 t2, u32 seq1, u32 seq2)
{
return t1 > t2 || (t1 == t2 && after(seq1, seq2));
}
/* These functions determine how the current flow behaves in respect of SACK /* These functions determine how the current flow behaves in respect of SACK
* handling. SACK is negotiated with the peer, and therefore it can vary * handling. SACK is negotiated with the peer, and therefore it can vary
* between different flows. * between different flows.

View File

@ -97,6 +97,7 @@ int xp_assign_dev(struct xsk_buff_pool *pool, struct net_device *dev,
u16 queue_id, u16 flags); u16 queue_id, u16 flags);
int xp_assign_dev_shared(struct xsk_buff_pool *pool, struct xdp_umem *umem, int xp_assign_dev_shared(struct xsk_buff_pool *pool, struct xdp_umem *umem,
struct net_device *dev, u16 queue_id); struct net_device *dev, u16 queue_id);
int xp_alloc_tx_descs(struct xsk_buff_pool *pool, struct xdp_sock *xs);
void xp_destroy(struct xsk_buff_pool *pool); void xp_destroy(struct xsk_buff_pool *pool);
void xp_get_pool(struct xsk_buff_pool *pool); void xp_get_pool(struct xsk_buff_pool *pool);
bool xp_put_pool(struct xsk_buff_pool *pool); bool xp_put_pool(struct xsk_buff_pool *pool);

View File

@ -2126,7 +2126,7 @@ static void kretprobe_rethook_handler(struct rethook_node *rh, void *data,
struct kprobe_ctlblk *kcb; struct kprobe_ctlblk *kcb;
/* The data must NOT be null. This means rethook data structure is broken. */ /* The data must NOT be null. This means rethook data structure is broken. */
if (WARN_ON_ONCE(!data)) if (WARN_ON_ONCE(!data) || !rp->handler)
return; return;
__this_cpu_write(current_kprobe, &rp->kp); __this_cpu_write(current_kprobe, &rp->kp);

View File

@ -670,7 +670,7 @@ static void le_conn_timeout(struct work_struct *work)
/* Disable LE Advertising */ /* Disable LE Advertising */
le_disable_advertising(hdev); le_disable_advertising(hdev);
hci_dev_lock(hdev); hci_dev_lock(hdev);
hci_le_conn_failed(conn, HCI_ERROR_ADVERTISING_TIMEOUT); hci_conn_failed(conn, HCI_ERROR_ADVERTISING_TIMEOUT);
hci_dev_unlock(hdev); hci_dev_unlock(hdev);
return; return;
} }
@ -873,7 +873,7 @@ struct hci_dev *hci_get_route(bdaddr_t *dst, bdaddr_t *src, uint8_t src_type)
EXPORT_SYMBOL(hci_get_route); EXPORT_SYMBOL(hci_get_route);
/* This function requires the caller holds hdev->lock */ /* This function requires the caller holds hdev->lock */
void hci_le_conn_failed(struct hci_conn *conn, u8 status) static void hci_le_conn_failed(struct hci_conn *conn, u8 status)
{ {
struct hci_dev *hdev = conn->hdev; struct hci_dev *hdev = conn->hdev;
struct hci_conn_params *params; struct hci_conn_params *params;
@ -886,8 +886,6 @@ void hci_le_conn_failed(struct hci_conn *conn, u8 status)
params->conn = NULL; params->conn = NULL;
} }
conn->state = BT_CLOSED;
/* If the status indicates successful cancellation of /* If the status indicates successful cancellation of
* the attempt (i.e. Unknown Connection Id) there's no point of * the attempt (i.e. Unknown Connection Id) there's no point of
* notifying failure since we'll go back to keep trying to * notifying failure since we'll go back to keep trying to
@ -899,10 +897,6 @@ void hci_le_conn_failed(struct hci_conn *conn, u8 status)
mgmt_connect_failed(hdev, &conn->dst, conn->type, mgmt_connect_failed(hdev, &conn->dst, conn->type,
conn->dst_type, status); conn->dst_type, status);
hci_connect_cfm(conn, status);
hci_conn_del(conn);
/* Since we may have temporarily stopped the background scanning in /* Since we may have temporarily stopped the background scanning in
* favor of connection establishment, we should restart it. * favor of connection establishment, we should restart it.
*/ */
@ -914,6 +908,28 @@ void hci_le_conn_failed(struct hci_conn *conn, u8 status)
hci_enable_advertising(hdev); hci_enable_advertising(hdev);
} }
/* This function requires the caller holds hdev->lock */
void hci_conn_failed(struct hci_conn *conn, u8 status)
{
struct hci_dev *hdev = conn->hdev;
bt_dev_dbg(hdev, "status 0x%2.2x", status);
switch (conn->type) {
case LE_LINK:
hci_le_conn_failed(conn, status);
break;
case ACL_LINK:
mgmt_connect_failed(hdev, &conn->dst, conn->type,
conn->dst_type, status);
break;
}
conn->state = BT_CLOSED;
hci_connect_cfm(conn, status);
hci_conn_del(conn);
}
static void create_le_conn_complete(struct hci_dev *hdev, void *data, int err) static void create_le_conn_complete(struct hci_dev *hdev, void *data, int err)
{ {
struct hci_conn *conn = data; struct hci_conn *conn = data;

View File

@ -2834,7 +2834,7 @@ static void hci_cs_le_create_conn(struct hci_dev *hdev, u8 status)
bt_dev_dbg(hdev, "status 0x%2.2x", status); bt_dev_dbg(hdev, "status 0x%2.2x", status);
/* All connection failure handling is taken care of by the /* All connection failure handling is taken care of by the
* hci_le_conn_failed function which is triggered by the HCI * hci_conn_failed function which is triggered by the HCI
* request completion callbacks used for connecting. * request completion callbacks used for connecting.
*/ */
if (status) if (status)
@ -2859,7 +2859,7 @@ static void hci_cs_le_ext_create_conn(struct hci_dev *hdev, u8 status)
bt_dev_dbg(hdev, "status 0x%2.2x", status); bt_dev_dbg(hdev, "status 0x%2.2x", status);
/* All connection failure handling is taken care of by the /* All connection failure handling is taken care of by the
* hci_le_conn_failed function which is triggered by the HCI * hci_conn_failed function which is triggered by the HCI
* request completion callbacks used for connecting. * request completion callbacks used for connecting.
*/ */
if (status) if (status)
@ -3067,18 +3067,20 @@ static void hci_conn_complete_evt(struct hci_dev *hdev, void *data,
{ {
struct hci_ev_conn_complete *ev = data; struct hci_ev_conn_complete *ev = data;
struct hci_conn *conn; struct hci_conn *conn;
u8 status = ev->status;
if (__le16_to_cpu(ev->handle) > HCI_CONN_HANDLE_MAX) { bt_dev_dbg(hdev, "status 0x%2.2x", status);
bt_dev_err(hdev, "Ignoring HCI_Connection_Complete for invalid handle");
return;
}
bt_dev_dbg(hdev, "status 0x%2.2x", ev->status);
hci_dev_lock(hdev); hci_dev_lock(hdev);
conn = hci_conn_hash_lookup_ba(hdev, ev->link_type, &ev->bdaddr); conn = hci_conn_hash_lookup_ba(hdev, ev->link_type, &ev->bdaddr);
if (!conn) { if (!conn) {
/* In case of error status and there is no connection pending
* just unlock as there is nothing to cleanup.
*/
if (ev->status)
goto unlock;
/* Connection may not exist if auto-connected. Check the bredr /* Connection may not exist if auto-connected. Check the bredr
* allowlist to see if this device is allowed to auto connect. * allowlist to see if this device is allowed to auto connect.
* If link is an ACL type, create a connection class * If link is an ACL type, create a connection class
@ -3122,8 +3124,14 @@ static void hci_conn_complete_evt(struct hci_dev *hdev, void *data,
goto unlock; goto unlock;
} }
if (!ev->status) { if (!status) {
conn->handle = __le16_to_cpu(ev->handle); conn->handle = __le16_to_cpu(ev->handle);
if (conn->handle > HCI_CONN_HANDLE_MAX) {
bt_dev_err(hdev, "Invalid handle: 0x%4.4x > 0x%4.4x",
conn->handle, HCI_CONN_HANDLE_MAX);
status = HCI_ERROR_INVALID_PARAMETERS;
goto done;
}
if (conn->type == ACL_LINK) { if (conn->type == ACL_LINK) {
conn->state = BT_CONFIG; conn->state = BT_CONFIG;
@ -3164,19 +3172,14 @@ static void hci_conn_complete_evt(struct hci_dev *hdev, void *data,
hci_send_cmd(hdev, HCI_OP_CHANGE_CONN_PTYPE, sizeof(cp), hci_send_cmd(hdev, HCI_OP_CHANGE_CONN_PTYPE, sizeof(cp),
&cp); &cp);
} }
} else {
conn->state = BT_CLOSED;
if (conn->type == ACL_LINK)
mgmt_connect_failed(hdev, &conn->dst, conn->type,
conn->dst_type, ev->status);
} }
if (conn->type == ACL_LINK) if (conn->type == ACL_LINK)
hci_sco_setup(conn, ev->status); hci_sco_setup(conn, ev->status);
if (ev->status) { done:
hci_connect_cfm(conn, ev->status); if (status) {
hci_conn_del(conn); hci_conn_failed(conn, status);
} else if (ev->link_type == SCO_LINK) { } else if (ev->link_type == SCO_LINK) {
switch (conn->setting & SCO_AIRMODE_MASK) { switch (conn->setting & SCO_AIRMODE_MASK) {
case SCO_AIRMODE_CVSD: case SCO_AIRMODE_CVSD:
@ -3185,7 +3188,7 @@ static void hci_conn_complete_evt(struct hci_dev *hdev, void *data,
break; break;
} }
hci_connect_cfm(conn, ev->status); hci_connect_cfm(conn, status);
} }
unlock: unlock:
@ -4676,6 +4679,7 @@ static void hci_sync_conn_complete_evt(struct hci_dev *hdev, void *data,
{ {
struct hci_ev_sync_conn_complete *ev = data; struct hci_ev_sync_conn_complete *ev = data;
struct hci_conn *conn; struct hci_conn *conn;
u8 status = ev->status;
switch (ev->link_type) { switch (ev->link_type) {
case SCO_LINK: case SCO_LINK:
@ -4690,12 +4694,7 @@ static void hci_sync_conn_complete_evt(struct hci_dev *hdev, void *data,
return; return;
} }
if (__le16_to_cpu(ev->handle) > HCI_CONN_HANDLE_MAX) { bt_dev_dbg(hdev, "status 0x%2.2x", status);
bt_dev_err(hdev, "Ignoring HCI_Sync_Conn_Complete for invalid handle");
return;
}
bt_dev_dbg(hdev, "status 0x%2.2x", ev->status);
hci_dev_lock(hdev); hci_dev_lock(hdev);
@ -4729,9 +4728,17 @@ static void hci_sync_conn_complete_evt(struct hci_dev *hdev, void *data,
goto unlock; goto unlock;
} }
switch (ev->status) { switch (status) {
case 0x00: case 0x00:
conn->handle = __le16_to_cpu(ev->handle); conn->handle = __le16_to_cpu(ev->handle);
if (conn->handle > HCI_CONN_HANDLE_MAX) {
bt_dev_err(hdev, "Invalid handle: 0x%4.4x > 0x%4.4x",
conn->handle, HCI_CONN_HANDLE_MAX);
status = HCI_ERROR_INVALID_PARAMETERS;
conn->state = BT_CLOSED;
break;
}
conn->state = BT_CONNECTED; conn->state = BT_CONNECTED;
conn->type = ev->link_type; conn->type = ev->link_type;
@ -4775,8 +4782,8 @@ static void hci_sync_conn_complete_evt(struct hci_dev *hdev, void *data,
} }
} }
hci_connect_cfm(conn, ev->status); hci_connect_cfm(conn, status);
if (ev->status) if (status)
hci_conn_del(conn); hci_conn_del(conn);
unlock: unlock:
@ -5527,11 +5534,6 @@ static void le_conn_complete_evt(struct hci_dev *hdev, u8 status,
struct smp_irk *irk; struct smp_irk *irk;
u8 addr_type; u8 addr_type;
if (handle > HCI_CONN_HANDLE_MAX) {
bt_dev_err(hdev, "Ignoring HCI_LE_Connection_Complete for invalid handle");
return;
}
hci_dev_lock(hdev); hci_dev_lock(hdev);
/* All controllers implicitly stop advertising in the event of a /* All controllers implicitly stop advertising in the event of a
@ -5541,6 +5543,12 @@ static void le_conn_complete_evt(struct hci_dev *hdev, u8 status,
conn = hci_lookup_le_connect(hdev); conn = hci_lookup_le_connect(hdev);
if (!conn) { if (!conn) {
/* In case of error status and there is no connection pending
* just unlock as there is nothing to cleanup.
*/
if (status)
goto unlock;
conn = hci_conn_add(hdev, LE_LINK, bdaddr, role); conn = hci_conn_add(hdev, LE_LINK, bdaddr, role);
if (!conn) { if (!conn) {
bt_dev_err(hdev, "no memory for new connection"); bt_dev_err(hdev, "no memory for new connection");
@ -5603,8 +5611,14 @@ static void le_conn_complete_evt(struct hci_dev *hdev, u8 status,
conn->dst_type = ev_bdaddr_type(hdev, conn->dst_type, NULL); conn->dst_type = ev_bdaddr_type(hdev, conn->dst_type, NULL);
if (handle > HCI_CONN_HANDLE_MAX) {
bt_dev_err(hdev, "Invalid handle: 0x%4.4x > 0x%4.4x", handle,
HCI_CONN_HANDLE_MAX);
status = HCI_ERROR_INVALID_PARAMETERS;
}
if (status) { if (status) {
hci_le_conn_failed(conn, status); hci_conn_failed(conn, status);
goto unlock; goto unlock;
} }

View File

@ -4408,12 +4408,21 @@ static int hci_reject_conn_sync(struct hci_dev *hdev, struct hci_conn *conn,
static int hci_abort_conn_sync(struct hci_dev *hdev, struct hci_conn *conn, static int hci_abort_conn_sync(struct hci_dev *hdev, struct hci_conn *conn,
u8 reason) u8 reason)
{ {
int err;
switch (conn->state) { switch (conn->state) {
case BT_CONNECTED: case BT_CONNECTED:
case BT_CONFIG: case BT_CONFIG:
return hci_disconnect_sync(hdev, conn, reason); return hci_disconnect_sync(hdev, conn, reason);
case BT_CONNECT: case BT_CONNECT:
return hci_connect_cancel_sync(hdev, conn); err = hci_connect_cancel_sync(hdev, conn);
/* Cleanup hci_conn object if it cannot be cancelled as it
* likelly means the controller and host stack are out of sync.
*/
if (err)
hci_conn_failed(conn, err);
return err;
case BT_CONNECT2: case BT_CONNECT2:
return hci_reject_conn_sync(hdev, conn, reason); return hci_reject_conn_sync(hdev, conn, reason);
default: default:

View File

@ -108,6 +108,7 @@ struct xdp_test_data {
struct page_pool *pp; struct page_pool *pp;
struct xdp_frame **frames; struct xdp_frame **frames;
struct sk_buff **skbs; struct sk_buff **skbs;
struct xdp_mem_info mem;
u32 batch_size; u32 batch_size;
u32 frame_cnt; u32 frame_cnt;
}; };
@ -147,7 +148,6 @@ static void xdp_test_run_init_page(struct page *page, void *arg)
static int xdp_test_run_setup(struct xdp_test_data *xdp, struct xdp_buff *orig_ctx) static int xdp_test_run_setup(struct xdp_test_data *xdp, struct xdp_buff *orig_ctx)
{ {
struct xdp_mem_info mem = {};
struct page_pool *pp; struct page_pool *pp;
int err = -ENOMEM; int err = -ENOMEM;
struct page_pool_params pp_params = { struct page_pool_params pp_params = {
@ -174,7 +174,7 @@ static int xdp_test_run_setup(struct xdp_test_data *xdp, struct xdp_buff *orig_c
} }
/* will copy 'mem.id' into pp->xdp_mem_id */ /* will copy 'mem.id' into pp->xdp_mem_id */
err = xdp_reg_mem_model(&mem, MEM_TYPE_PAGE_POOL, pp); err = xdp_reg_mem_model(&xdp->mem, MEM_TYPE_PAGE_POOL, pp);
if (err) if (err)
goto err_mmodel; goto err_mmodel;
@ -202,6 +202,7 @@ err_skbs:
static void xdp_test_run_teardown(struct xdp_test_data *xdp) static void xdp_test_run_teardown(struct xdp_test_data *xdp)
{ {
xdp_unreg_mem_model(&xdp->mem);
page_pool_destroy(xdp->pp); page_pool_destroy(xdp->pp);
kfree(xdp->frames); kfree(xdp->frames);
kfree(xdp->skbs); kfree(xdp->skbs);

View File

@ -353,6 +353,8 @@ static int br_switchdev_vlan_attr_replay(struct net_device *br_dev,
attr.orig_dev = br_dev; attr.orig_dev = br_dev;
vg = br_vlan_group(br); vg = br_vlan_group(br);
if (!vg)
return 0;
list_for_each_entry(v, &vg->vlan_list, vlist) { list_for_each_entry(v, &vg->vlan_list, vlist) {
if (v->msti) { if (v->msti) {

View File

@ -10304,7 +10304,7 @@ void netdev_stats_to_stats64(struct rtnl_link_stats64 *stats64,
} }
EXPORT_SYMBOL(netdev_stats_to_stats64); EXPORT_SYMBOL(netdev_stats_to_stats64);
struct net_device_core_stats *netdev_core_stats_alloc(struct net_device *dev) struct net_device_core_stats __percpu *netdev_core_stats_alloc(struct net_device *dev)
{ {
struct net_device_core_stats __percpu *p; struct net_device_core_stats __percpu *p;
@ -10315,11 +10315,7 @@ struct net_device_core_stats *netdev_core_stats_alloc(struct net_device *dev)
free_percpu(p); free_percpu(p);
/* This READ_ONCE() pairs with the cmpxchg() above */ /* This READ_ONCE() pairs with the cmpxchg() above */
p = READ_ONCE(dev->core_stats); return READ_ONCE(dev->core_stats);
if (!p)
return NULL;
return this_cpu_ptr(p);
} }
EXPORT_SYMBOL(netdev_core_stats_alloc); EXPORT_SYMBOL(netdev_core_stats_alloc);
@ -10356,9 +10352,9 @@ struct rtnl_link_stats64 *dev_get_stats(struct net_device *dev,
for_each_possible_cpu(i) { for_each_possible_cpu(i) {
core_stats = per_cpu_ptr(p, i); core_stats = per_cpu_ptr(p, i);
storage->rx_dropped += local_read(&core_stats->rx_dropped); storage->rx_dropped += READ_ONCE(core_stats->rx_dropped);
storage->tx_dropped += local_read(&core_stats->tx_dropped); storage->tx_dropped += READ_ONCE(core_stats->tx_dropped);
storage->rx_nohandler += local_read(&core_stats->rx_nohandler); storage->rx_nohandler += READ_ONCE(core_stats->rx_nohandler);
} }
} }
return storage; return storage;

View File

@ -159,10 +159,8 @@ static int bpf_output(struct net *net, struct sock *sk, struct sk_buff *skb)
return dst->lwtstate->orig_output(net, sk, skb); return dst->lwtstate->orig_output(net, sk, skb);
} }
static int xmit_check_hhlen(struct sk_buff *skb) static int xmit_check_hhlen(struct sk_buff *skb, int hh_len)
{ {
int hh_len = skb_dst(skb)->dev->hard_header_len;
if (skb_headroom(skb) < hh_len) { if (skb_headroom(skb) < hh_len) {
int nhead = HH_DATA_ALIGN(hh_len - skb_headroom(skb)); int nhead = HH_DATA_ALIGN(hh_len - skb_headroom(skb));
@ -274,6 +272,7 @@ static int bpf_xmit(struct sk_buff *skb)
bpf = bpf_lwt_lwtunnel(dst->lwtstate); bpf = bpf_lwt_lwtunnel(dst->lwtstate);
if (bpf->xmit.prog) { if (bpf->xmit.prog) {
int hh_len = dst->dev->hard_header_len;
__be16 proto = skb->protocol; __be16 proto = skb->protocol;
int ret; int ret;
@ -291,7 +290,7 @@ static int bpf_xmit(struct sk_buff *skb)
/* If the header was expanded, headroom might be too /* If the header was expanded, headroom might be too
* small for L2 header to come, expand as needed. * small for L2 header to come, expand as needed.
*/ */
ret = xmit_check_hhlen(skb); ret = xmit_check_hhlen(skb, hh_len);
if (unlikely(ret)) if (unlikely(ret))
return ret; return ret;

View File

@ -1620,8 +1620,10 @@ int dsa_port_link_register_of(struct dsa_port *dp)
if (ds->ops->phylink_mac_link_down) if (ds->ops->phylink_mac_link_down)
ds->ops->phylink_mac_link_down(ds, port, ds->ops->phylink_mac_link_down(ds, port,
MLO_AN_FIXED, PHY_INTERFACE_MODE_NA); MLO_AN_FIXED, PHY_INTERFACE_MODE_NA);
of_node_put(phy_np);
return dsa_port_phylink_register(dp); return dsa_port_phylink_register(dp);
} }
of_node_put(phy_np);
return 0; return 0;
} }

View File

@ -285,7 +285,7 @@ static void dsa_port_manage_cpu_flood(struct dsa_port *dp)
if (other_dp->slave->flags & IFF_ALLMULTI) if (other_dp->slave->flags & IFF_ALLMULTI)
flags.val |= BR_MCAST_FLOOD; flags.val |= BR_MCAST_FLOOD;
if (other_dp->slave->flags & IFF_PROMISC) if (other_dp->slave->flags & IFF_PROMISC)
flags.val |= BR_FLOOD; flags.val |= BR_FLOOD | BR_MCAST_FLOOD;
} }
err = dsa_port_pre_bridge_flags(dp, flags, NULL); err = dsa_port_pre_bridge_flags(dp, flags, NULL);

View File

@ -459,14 +459,12 @@ static void __gre_xmit(struct sk_buff *skb, struct net_device *dev,
__be16 proto) __be16 proto)
{ {
struct ip_tunnel *tunnel = netdev_priv(dev); struct ip_tunnel *tunnel = netdev_priv(dev);
__be16 flags = tunnel->parms.o_flags;
if (tunnel->parms.o_flags & TUNNEL_SEQ)
tunnel->o_seqno++;
/* Push GRE header. */ /* Push GRE header. */
gre_build_header(skb, tunnel->tun_hlen, gre_build_header(skb, tunnel->tun_hlen,
tunnel->parms.o_flags, proto, tunnel->parms.o_key, flags, proto, tunnel->parms.o_key,
htonl(tunnel->o_seqno)); (flags & TUNNEL_SEQ) ? htonl(atomic_fetch_inc(&tunnel->o_seqno)) : 0);
ip_tunnel_xmit(skb, dev, tnl_params, tnl_params->protocol); ip_tunnel_xmit(skb, dev, tnl_params, tnl_params->protocol);
} }
@ -504,7 +502,7 @@ static void gre_fb_xmit(struct sk_buff *skb, struct net_device *dev,
(TUNNEL_CSUM | TUNNEL_KEY | TUNNEL_SEQ); (TUNNEL_CSUM | TUNNEL_KEY | TUNNEL_SEQ);
gre_build_header(skb, tunnel_hlen, flags, proto, gre_build_header(skb, tunnel_hlen, flags, proto,
tunnel_id_to_key32(tun_info->key.tun_id), tunnel_id_to_key32(tun_info->key.tun_id),
(flags & TUNNEL_SEQ) ? htonl(tunnel->o_seqno++) : 0); (flags & TUNNEL_SEQ) ? htonl(atomic_fetch_inc(&tunnel->o_seqno)) : 0);
ip_md_tunnel_xmit(skb, dev, IPPROTO_GRE, tunnel_hlen); ip_md_tunnel_xmit(skb, dev, IPPROTO_GRE, tunnel_hlen);
@ -581,7 +579,7 @@ static void erspan_fb_xmit(struct sk_buff *skb, struct net_device *dev)
} }
gre_build_header(skb, 8, TUNNEL_SEQ, gre_build_header(skb, 8, TUNNEL_SEQ,
proto, 0, htonl(tunnel->o_seqno++)); proto, 0, htonl(atomic_fetch_inc(&tunnel->o_seqno)));
ip_md_tunnel_xmit(skb, dev, IPPROTO_GRE, tunnel_hlen); ip_md_tunnel_xmit(skb, dev, IPPROTO_GRE, tunnel_hlen);

View File

@ -281,6 +281,7 @@ bool cookie_ecn_ok(const struct tcp_options_received *tcp_opt,
EXPORT_SYMBOL(cookie_ecn_ok); EXPORT_SYMBOL(cookie_ecn_ok);
struct request_sock *cookie_tcp_reqsk_alloc(const struct request_sock_ops *ops, struct request_sock *cookie_tcp_reqsk_alloc(const struct request_sock_ops *ops,
const struct tcp_request_sock_ops *af_ops,
struct sock *sk, struct sock *sk,
struct sk_buff *skb) struct sk_buff *skb)
{ {
@ -297,6 +298,10 @@ struct request_sock *cookie_tcp_reqsk_alloc(const struct request_sock_ops *ops,
return NULL; return NULL;
treq = tcp_rsk(req); treq = tcp_rsk(req);
/* treq->af_specific might be used to perform TCP_MD5 lookup */
treq->af_specific = af_ops;
treq->syn_tos = TCP_SKB_CB(skb)->ip_dsfield; treq->syn_tos = TCP_SKB_CB(skb)->ip_dsfield;
#if IS_ENABLED(CONFIG_MPTCP) #if IS_ENABLED(CONFIG_MPTCP)
treq->is_mptcp = sk_is_mptcp(sk); treq->is_mptcp = sk_is_mptcp(sk);
@ -364,7 +369,8 @@ struct sock *cookie_v4_check(struct sock *sk, struct sk_buff *skb)
goto out; goto out;
ret = NULL; ret = NULL;
req = cookie_tcp_reqsk_alloc(&tcp_request_sock_ops, sk, skb); req = cookie_tcp_reqsk_alloc(&tcp_request_sock_ops,
&tcp_request_sock_ipv4_ops, sk, skb);
if (!req) if (!req)
goto out; goto out;

View File

@ -3867,7 +3867,8 @@ static int tcp_ack(struct sock *sk, const struct sk_buff *skb, int flag)
tcp_process_tlp_ack(sk, ack, flag); tcp_process_tlp_ack(sk, ack, flag);
if (tcp_ack_is_dubious(sk, flag)) { if (tcp_ack_is_dubious(sk, flag)) {
if (!(flag & (FLAG_SND_UNA_ADVANCED | FLAG_NOT_DUP))) { if (!(flag & (FLAG_SND_UNA_ADVANCED |
FLAG_NOT_DUP | FLAG_DSACKING_ACK))) {
num_dupack = 1; num_dupack = 1;
/* Consider if pure acks were aggregated in tcp_add_backlog() */ /* Consider if pure acks were aggregated in tcp_add_backlog() */
if (!(flag & FLAG_DATA)) if (!(flag & FLAG_DATA))
@ -5454,7 +5455,17 @@ static void tcp_new_space(struct sock *sk)
INDIRECT_CALL_1(sk->sk_write_space, sk_stream_write_space, sk); INDIRECT_CALL_1(sk->sk_write_space, sk_stream_write_space, sk);
} }
static void tcp_check_space(struct sock *sk) /* Caller made space either from:
* 1) Freeing skbs in rtx queues (after tp->snd_una has advanced)
* 2) Sent skbs from output queue (and thus advancing tp->snd_nxt)
*
* We might be able to generate EPOLLOUT to the application if:
* 1) Space consumed in output/rtx queues is below sk->sk_sndbuf/2
* 2) notsent amount (tp->write_seq - tp->snd_nxt) became
* small enough that tcp_stream_memory_free() decides it
* is time to generate EPOLLOUT.
*/
void tcp_check_space(struct sock *sk)
{ {
/* pairs with tcp_poll() */ /* pairs with tcp_poll() */
smp_mb(); smp_mb();

View File

@ -531,7 +531,7 @@ struct sock *tcp_create_openreq_child(const struct sock *sk,
newtp->tsoffset = treq->ts_off; newtp->tsoffset = treq->ts_off;
#ifdef CONFIG_TCP_MD5SIG #ifdef CONFIG_TCP_MD5SIG
newtp->md5sig_info = NULL; /*XXX*/ newtp->md5sig_info = NULL; /*XXX*/
if (newtp->af_specific->md5_lookup(sk, newsk)) if (treq->af_specific->req_md5_lookup(sk, req_to_sk(req)))
newtp->tcp_header_len += TCPOLEN_MD5SIG_ALIGNED; newtp->tcp_header_len += TCPOLEN_MD5SIG_ALIGNED;
#endif #endif
if (skb->len >= TCP_MSS_DEFAULT + newtp->tcp_header_len) if (skb->len >= TCP_MSS_DEFAULT + newtp->tcp_header_len)

View File

@ -82,6 +82,7 @@ static void tcp_event_new_data_sent(struct sock *sk, struct sk_buff *skb)
NET_ADD_STATS(sock_net(sk), LINUX_MIB_TCPORIGDATASENT, NET_ADD_STATS(sock_net(sk), LINUX_MIB_TCPORIGDATASENT,
tcp_skb_pcount(skb)); tcp_skb_pcount(skb));
tcp_check_space(sk);
} }
/* SND.NXT, if window was not shrunk or the amount of shrunk was less than one /* SND.NXT, if window was not shrunk or the amount of shrunk was less than one

View File

@ -74,27 +74,32 @@ void tcp_rate_skb_sent(struct sock *sk, struct sk_buff *skb)
* *
* If an ACK (s)acks multiple skbs (e.g., stretched-acks), this function is * If an ACK (s)acks multiple skbs (e.g., stretched-acks), this function is
* called multiple times. We favor the information from the most recently * called multiple times. We favor the information from the most recently
* sent skb, i.e., the skb with the highest prior_delivered count. * sent skb, i.e., the skb with the most recently sent time and the highest
* sequence.
*/ */
void tcp_rate_skb_delivered(struct sock *sk, struct sk_buff *skb, void tcp_rate_skb_delivered(struct sock *sk, struct sk_buff *skb,
struct rate_sample *rs) struct rate_sample *rs)
{ {
struct tcp_sock *tp = tcp_sk(sk); struct tcp_sock *tp = tcp_sk(sk);
struct tcp_skb_cb *scb = TCP_SKB_CB(skb); struct tcp_skb_cb *scb = TCP_SKB_CB(skb);
u64 tx_tstamp;
if (!scb->tx.delivered_mstamp) if (!scb->tx.delivered_mstamp)
return; return;
tx_tstamp = tcp_skb_timestamp_us(skb);
if (!rs->prior_delivered || if (!rs->prior_delivered ||
after(scb->tx.delivered, rs->prior_delivered)) { tcp_skb_sent_after(tx_tstamp, tp->first_tx_mstamp,
scb->end_seq, rs->last_end_seq)) {
rs->prior_delivered_ce = scb->tx.delivered_ce; rs->prior_delivered_ce = scb->tx.delivered_ce;
rs->prior_delivered = scb->tx.delivered; rs->prior_delivered = scb->tx.delivered;
rs->prior_mstamp = scb->tx.delivered_mstamp; rs->prior_mstamp = scb->tx.delivered_mstamp;
rs->is_app_limited = scb->tx.is_app_limited; rs->is_app_limited = scb->tx.is_app_limited;
rs->is_retrans = scb->sacked & TCPCB_RETRANS; rs->is_retrans = scb->sacked & TCPCB_RETRANS;
rs->last_end_seq = scb->end_seq;
/* Record send time of most recently ACKed packet: */ /* Record send time of most recently ACKed packet: */
tp->first_tx_mstamp = tcp_skb_timestamp_us(skb); tp->first_tx_mstamp = tx_tstamp;
/* Find the duration of the "send phase" of this window: */ /* Find the duration of the "send phase" of this window: */
rs->interval_us = tcp_stamp_us_delta(tp->first_tx_mstamp, rs->interval_us = tcp_stamp_us_delta(tp->first_tx_mstamp,
scb->tx.first_tx_mstamp); scb->tx.first_tx_mstamp);

View File

@ -724,6 +724,7 @@ static netdev_tx_t __gre6_xmit(struct sk_buff *skb,
{ {
struct ip6_tnl *tunnel = netdev_priv(dev); struct ip6_tnl *tunnel = netdev_priv(dev);
__be16 protocol; __be16 protocol;
__be16 flags;
if (dev->type == ARPHRD_ETHER) if (dev->type == ARPHRD_ETHER)
IPCB(skb)->flags = 0; IPCB(skb)->flags = 0;
@ -739,7 +740,6 @@ static netdev_tx_t __gre6_xmit(struct sk_buff *skb,
if (tunnel->parms.collect_md) { if (tunnel->parms.collect_md) {
struct ip_tunnel_info *tun_info; struct ip_tunnel_info *tun_info;
const struct ip_tunnel_key *key; const struct ip_tunnel_key *key;
__be16 flags;
int tun_hlen; int tun_hlen;
tun_info = skb_tunnel_info_txcheck(skb); tun_info = skb_tunnel_info_txcheck(skb);
@ -766,19 +766,19 @@ static netdev_tx_t __gre6_xmit(struct sk_buff *skb,
gre_build_header(skb, tun_hlen, gre_build_header(skb, tun_hlen,
flags, protocol, flags, protocol,
tunnel_id_to_key32(tun_info->key.tun_id), tunnel_id_to_key32(tun_info->key.tun_id),
(flags & TUNNEL_SEQ) ? htonl(tunnel->o_seqno++) (flags & TUNNEL_SEQ) ? htonl(atomic_fetch_inc(&tunnel->o_seqno))
: 0); : 0);
} else { } else {
if (tunnel->parms.o_flags & TUNNEL_SEQ)
tunnel->o_seqno++;
if (skb_cow_head(skb, dev->needed_headroom ?: tunnel->hlen)) if (skb_cow_head(skb, dev->needed_headroom ?: tunnel->hlen))
return -ENOMEM; return -ENOMEM;
gre_build_header(skb, tunnel->tun_hlen, tunnel->parms.o_flags, flags = tunnel->parms.o_flags;
gre_build_header(skb, tunnel->tun_hlen, flags,
protocol, tunnel->parms.o_key, protocol, tunnel->parms.o_key,
htonl(tunnel->o_seqno)); (flags & TUNNEL_SEQ) ? htonl(atomic_fetch_inc(&tunnel->o_seqno))
: 0);
} }
return ip6_tnl_xmit(skb, dev, dsfield, fl6, encap_limit, pmtu, return ip6_tnl_xmit(skb, dev, dsfield, fl6, encap_limit, pmtu,
@ -1056,7 +1056,7 @@ static netdev_tx_t ip6erspan_tunnel_xmit(struct sk_buff *skb,
/* Push GRE header. */ /* Push GRE header. */
proto = (t->parms.erspan_ver == 1) ? htons(ETH_P_ERSPAN) proto = (t->parms.erspan_ver == 1) ? htons(ETH_P_ERSPAN)
: htons(ETH_P_ERSPAN2); : htons(ETH_P_ERSPAN2);
gre_build_header(skb, 8, TUNNEL_SEQ, proto, 0, htonl(t->o_seqno++)); gre_build_header(skb, 8, TUNNEL_SEQ, proto, 0, htonl(atomic_fetch_inc(&t->o_seqno)));
/* TooBig packet may have updated dst->dev's mtu */ /* TooBig packet may have updated dst->dev's mtu */
if (!t->parms.collect_md && dst && dst_mtu(dst) > dst->dev->mtu) if (!t->parms.collect_md && dst && dst_mtu(dst) > dst->dev->mtu)

View File

@ -24,14 +24,13 @@ int ip6_route_me_harder(struct net *net, struct sock *sk_partial, struct sk_buff
{ {
const struct ipv6hdr *iph = ipv6_hdr(skb); const struct ipv6hdr *iph = ipv6_hdr(skb);
struct sock *sk = sk_to_full_sk(sk_partial); struct sock *sk = sk_to_full_sk(sk_partial);
struct net_device *dev = skb_dst(skb)->dev;
struct flow_keys flkeys; struct flow_keys flkeys;
unsigned int hh_len; unsigned int hh_len;
struct dst_entry *dst; struct dst_entry *dst;
int strict = (ipv6_addr_type(&iph->daddr) & int strict = (ipv6_addr_type(&iph->daddr) &
(IPV6_ADDR_MULTICAST | IPV6_ADDR_LINKLOCAL)); (IPV6_ADDR_MULTICAST | IPV6_ADDR_LINKLOCAL));
struct flowi6 fl6 = { struct flowi6 fl6 = {
.flowi6_oif = sk && sk->sk_bound_dev_if ? sk->sk_bound_dev_if :
strict ? skb_dst(skb)->dev->ifindex : 0,
.flowi6_mark = skb->mark, .flowi6_mark = skb->mark,
.flowi6_uid = sock_net_uid(net, sk), .flowi6_uid = sock_net_uid(net, sk),
.daddr = iph->daddr, .daddr = iph->daddr,
@ -39,6 +38,13 @@ int ip6_route_me_harder(struct net *net, struct sock *sk_partial, struct sk_buff
}; };
int err; int err;
if (sk && sk->sk_bound_dev_if)
fl6.flowi6_oif = sk->sk_bound_dev_if;
else if (strict)
fl6.flowi6_oif = dev->ifindex;
else
fl6.flowi6_oif = l3mdev_master_ifindex(dev);
fib6_rules_early_flow_dissect(net, skb, &fl6, &flkeys); fib6_rules_early_flow_dissect(net, skb, &fl6, &flkeys);
dst = ip6_route_output(net, sk, &fl6); dst = ip6_route_output(net, sk, &fl6);
err = dst->error; err = dst->error;

View File

@ -170,7 +170,8 @@ struct sock *cookie_v6_check(struct sock *sk, struct sk_buff *skb)
goto out; goto out;
ret = NULL; ret = NULL;
req = cookie_tcp_reqsk_alloc(&tcp6_request_sock_ops, sk, skb); req = cookie_tcp_reqsk_alloc(&tcp6_request_sock_ops,
&tcp_request_sock_ipv6_ops, sk, skb);
if (!req) if (!req)
goto out; goto out;

View File

@ -313,6 +313,7 @@ void mctp_dev_hold(struct mctp_dev *mdev)
void mctp_dev_put(struct mctp_dev *mdev) void mctp_dev_put(struct mctp_dev *mdev)
{ {
if (mdev && refcount_dec_and_test(&mdev->refs)) { if (mdev && refcount_dec_and_test(&mdev->refs)) {
kfree(mdev->addrs);
dev_put(mdev->dev); dev_put(mdev->dev);
kfree_rcu(mdev, rcu); kfree_rcu(mdev, rcu);
} }
@ -441,7 +442,6 @@ static void mctp_unregister(struct net_device *dev)
mctp_route_remove_dev(mdev); mctp_route_remove_dev(mdev);
mctp_neigh_remove_dev(mdev); mctp_neigh_remove_dev(mdev);
kfree(mdev->addrs);
mctp_dev_put(mdev); mctp_dev_put(mdev);
} }

View File

@ -1495,7 +1495,7 @@ int __init ip_vs_conn_init(void)
pr_info("Connection hash table configured " pr_info("Connection hash table configured "
"(size=%d, memory=%ldKbytes)\n", "(size=%d, memory=%ldKbytes)\n",
ip_vs_conn_tab_size, ip_vs_conn_tab_size,
(long)(ip_vs_conn_tab_size*sizeof(struct list_head))/1024); (long)(ip_vs_conn_tab_size*sizeof(*ip_vs_conn_tab))/1024);
IP_VS_DBG(0, "Each connection entry needs %zd bytes at least\n", IP_VS_DBG(0, "Each connection entry needs %zd bytes at least\n",
sizeof(struct ip_vs_conn)); sizeof(struct ip_vs_conn));

View File

@ -556,24 +556,14 @@ static bool tcp_in_window(struct nf_conn *ct,
} }
} }
} else if (((state->state == TCP_CONNTRACK_SYN_SENT } else if (tcph->syn &&
&& dir == IP_CT_DIR_ORIGINAL) after(end, sender->td_end) &&
|| (state->state == TCP_CONNTRACK_SYN_RECV (state->state == TCP_CONNTRACK_SYN_SENT ||
&& dir == IP_CT_DIR_REPLY)) state->state == TCP_CONNTRACK_SYN_RECV)) {
&& after(end, sender->td_end)) {
/* /*
* RFC 793: "if a TCP is reinitialized ... then it need * RFC 793: "if a TCP is reinitialized ... then it need
* not wait at all; it must only be sure to use sequence * not wait at all; it must only be sure to use sequence
* numbers larger than those recently used." * numbers larger than those recently used."
*/
sender->td_end =
sender->td_maxend = end;
sender->td_maxwin = (win == 0 ? 1 : win);
tcp_options(skb, dataoff, tcph, sender);
} else if (tcph->syn && dir == IP_CT_DIR_REPLY &&
state->state == TCP_CONNTRACK_SYN_SENT) {
/* Retransmitted syn-ack, or syn (simultaneous open).
* *
* Re-init state for this direction, just like for the first * Re-init state for this direction, just like for the first
* syn(-ack) reply, it might differ in seq, ack or tcp options. * syn(-ack) reply, it might differ in seq, ack or tcp options.
@ -581,7 +571,8 @@ static bool tcp_in_window(struct nf_conn *ct,
tcp_init_sender(sender, receiver, tcp_init_sender(sender, receiver,
skb, dataoff, tcph, skb, dataoff, tcph,
end, win); end, win);
if (!tcph->ack)
if (dir == IP_CT_DIR_REPLY && !tcph->ack)
return true; return true;
} }

View File

@ -823,7 +823,7 @@ static struct ctl_table nf_ct_sysctl_table[] = {
.mode = 0644, .mode = 0644,
.proc_handler = proc_dointvec_jiffies, .proc_handler = proc_dointvec_jiffies,
}, },
#if IS_ENABLED(CONFIG_NFT_FLOW_OFFLOAD) #if IS_ENABLED(CONFIG_NF_FLOW_TABLE)
[NF_SYSCTL_CT_PROTO_TIMEOUT_UDP_OFFLOAD] = { [NF_SYSCTL_CT_PROTO_TIMEOUT_UDP_OFFLOAD] = {
.procname = "nf_flowtable_udp_timeout", .procname = "nf_flowtable_udp_timeout",
.maxlen = sizeof(unsigned int), .maxlen = sizeof(unsigned int),

View File

@ -349,7 +349,11 @@ static int __nft_rbtree_insert(const struct net *net, const struct nft_set *set,
*ext = &rbe->ext; *ext = &rbe->ext;
return -EEXIST; return -EEXIST;
} else { } else {
p = &parent->rb_left; overlap = false;
if (nft_rbtree_interval_end(rbe))
p = &parent->rb_left;
else
p = &parent->rb_right;
} }
} }

View File

@ -54,6 +54,32 @@ nft_sock_get_eval_cgroupv2(u32 *dest, struct sock *sk, const struct nft_pktinfo
} }
#endif #endif
static struct sock *nft_socket_do_lookup(const struct nft_pktinfo *pkt)
{
const struct net_device *indev = nft_in(pkt);
const struct sk_buff *skb = pkt->skb;
struct sock *sk = NULL;
if (!indev)
return NULL;
switch (nft_pf(pkt)) {
case NFPROTO_IPV4:
sk = nf_sk_lookup_slow_v4(nft_net(pkt), skb, indev);
break;
#if IS_ENABLED(CONFIG_NF_TABLES_IPV6)
case NFPROTO_IPV6:
sk = nf_sk_lookup_slow_v6(nft_net(pkt), skb, indev);
break;
#endif
default:
WARN_ON_ONCE(1);
break;
}
return sk;
}
static void nft_socket_eval(const struct nft_expr *expr, static void nft_socket_eval(const struct nft_expr *expr,
struct nft_regs *regs, struct nft_regs *regs,
const struct nft_pktinfo *pkt) const struct nft_pktinfo *pkt)
@ -67,20 +93,7 @@ static void nft_socket_eval(const struct nft_expr *expr,
sk = NULL; sk = NULL;
if (!sk) if (!sk)
switch(nft_pf(pkt)) { sk = nft_socket_do_lookup(pkt);
case NFPROTO_IPV4:
sk = nf_sk_lookup_slow_v4(nft_net(pkt), skb, nft_in(pkt));
break;
#if IS_ENABLED(CONFIG_NF_TABLES_IPV6)
case NFPROTO_IPV6:
sk = nf_sk_lookup_slow_v6(nft_net(pkt), skb, nft_in(pkt));
break;
#endif
default:
WARN_ON_ONCE(1);
regs->verdict.code = NFT_BREAK;
return;
}
if (!sk) { if (!sk) {
regs->verdict.code = NFT_BREAK; regs->verdict.code = NFT_BREAK;
@ -224,6 +237,16 @@ static bool nft_socket_reduce(struct nft_regs_track *track,
return nft_expr_reduce_bitwise(track, expr); return nft_expr_reduce_bitwise(track, expr);
} }
static int nft_socket_validate(const struct nft_ctx *ctx,
const struct nft_expr *expr,
const struct nft_data **data)
{
return nft_chain_validate_hooks(ctx->chain,
(1 << NF_INET_PRE_ROUTING) |
(1 << NF_INET_LOCAL_IN) |
(1 << NF_INET_LOCAL_OUT));
}
static struct nft_expr_type nft_socket_type; static struct nft_expr_type nft_socket_type;
static const struct nft_expr_ops nft_socket_ops = { static const struct nft_expr_ops nft_socket_ops = {
.type = &nft_socket_type, .type = &nft_socket_type,
@ -231,6 +254,7 @@ static const struct nft_expr_ops nft_socket_ops = {
.eval = nft_socket_eval, .eval = nft_socket_eval,
.init = nft_socket_init, .init = nft_socket_init,
.dump = nft_socket_dump, .dump = nft_socket_dump,
.validate = nft_socket_validate,
.reduce = nft_socket_reduce, .reduce = nft_socket_reduce,
}; };

View File

@ -458,6 +458,10 @@ void sctp_generate_reconf_event(struct timer_list *t)
goto out_unlock; goto out_unlock;
} }
/* This happens when the response arrives after the timer is triggered. */
if (!asoc->strreset_chunk)
goto out_unlock;
error = sctp_do_sm(net, SCTP_EVENT_T_TIMEOUT, error = sctp_do_sm(net, SCTP_EVENT_T_TIMEOUT,
SCTP_ST_TIMEOUT(SCTP_EVENT_TIMEOUT_RECONF), SCTP_ST_TIMEOUT(SCTP_EVENT_TIMEOUT_RECONF),
asoc->state, asoc->ep, asoc, asoc->state, asoc->ep, asoc,

View File

@ -243,11 +243,27 @@ struct proto smc_proto6 = {
}; };
EXPORT_SYMBOL_GPL(smc_proto6); EXPORT_SYMBOL_GPL(smc_proto6);
static void smc_fback_restore_callbacks(struct smc_sock *smc)
{
struct sock *clcsk = smc->clcsock->sk;
write_lock_bh(&clcsk->sk_callback_lock);
clcsk->sk_user_data = NULL;
smc_clcsock_restore_cb(&clcsk->sk_state_change, &smc->clcsk_state_change);
smc_clcsock_restore_cb(&clcsk->sk_data_ready, &smc->clcsk_data_ready);
smc_clcsock_restore_cb(&clcsk->sk_write_space, &smc->clcsk_write_space);
smc_clcsock_restore_cb(&clcsk->sk_error_report, &smc->clcsk_error_report);
write_unlock_bh(&clcsk->sk_callback_lock);
}
static void smc_restore_fallback_changes(struct smc_sock *smc) static void smc_restore_fallback_changes(struct smc_sock *smc)
{ {
if (smc->clcsock->file) { /* non-accepted sockets have no file yet */ if (smc->clcsock->file) { /* non-accepted sockets have no file yet */
smc->clcsock->file->private_data = smc->sk.sk_socket; smc->clcsock->file->private_data = smc->sk.sk_socket;
smc->clcsock->file = NULL; smc->clcsock->file = NULL;
smc_fback_restore_callbacks(smc);
} }
} }
@ -373,6 +389,7 @@ static struct sock *smc_sock_alloc(struct net *net, struct socket *sock,
sk->sk_prot->hash(sk); sk->sk_prot->hash(sk);
sk_refcnt_debug_inc(sk); sk_refcnt_debug_inc(sk);
mutex_init(&smc->clcsock_release_lock); mutex_init(&smc->clcsock_release_lock);
smc_init_saved_callbacks(smc);
return sk; return sk;
} }
@ -744,47 +761,73 @@ out:
static void smc_fback_state_change(struct sock *clcsk) static void smc_fback_state_change(struct sock *clcsk)
{ {
struct smc_sock *smc = struct smc_sock *smc;
smc_clcsock_user_data(clcsk);
if (!smc) read_lock_bh(&clcsk->sk_callback_lock);
return; smc = smc_clcsock_user_data(clcsk);
smc_fback_forward_wakeup(smc, clcsk, smc->clcsk_state_change); if (smc)
smc_fback_forward_wakeup(smc, clcsk,
smc->clcsk_state_change);
read_unlock_bh(&clcsk->sk_callback_lock);
} }
static void smc_fback_data_ready(struct sock *clcsk) static void smc_fback_data_ready(struct sock *clcsk)
{ {
struct smc_sock *smc = struct smc_sock *smc;
smc_clcsock_user_data(clcsk);
if (!smc) read_lock_bh(&clcsk->sk_callback_lock);
return; smc = smc_clcsock_user_data(clcsk);
smc_fback_forward_wakeup(smc, clcsk, smc->clcsk_data_ready); if (smc)
smc_fback_forward_wakeup(smc, clcsk,
smc->clcsk_data_ready);
read_unlock_bh(&clcsk->sk_callback_lock);
} }
static void smc_fback_write_space(struct sock *clcsk) static void smc_fback_write_space(struct sock *clcsk)
{ {
struct smc_sock *smc = struct smc_sock *smc;
smc_clcsock_user_data(clcsk);
if (!smc) read_lock_bh(&clcsk->sk_callback_lock);
return; smc = smc_clcsock_user_data(clcsk);
smc_fback_forward_wakeup(smc, clcsk, smc->clcsk_write_space); if (smc)
smc_fback_forward_wakeup(smc, clcsk,
smc->clcsk_write_space);
read_unlock_bh(&clcsk->sk_callback_lock);
} }
static void smc_fback_error_report(struct sock *clcsk) static void smc_fback_error_report(struct sock *clcsk)
{ {
struct smc_sock *smc = struct smc_sock *smc;
smc_clcsock_user_data(clcsk);
if (!smc) read_lock_bh(&clcsk->sk_callback_lock);
return; smc = smc_clcsock_user_data(clcsk);
smc_fback_forward_wakeup(smc, clcsk, smc->clcsk_error_report); if (smc)
smc_fback_forward_wakeup(smc, clcsk,
smc->clcsk_error_report);
read_unlock_bh(&clcsk->sk_callback_lock);
}
static void smc_fback_replace_callbacks(struct smc_sock *smc)
{
struct sock *clcsk = smc->clcsock->sk;
write_lock_bh(&clcsk->sk_callback_lock);
clcsk->sk_user_data = (void *)((uintptr_t)smc | SK_USER_DATA_NOCOPY);
smc_clcsock_replace_cb(&clcsk->sk_state_change, smc_fback_state_change,
&smc->clcsk_state_change);
smc_clcsock_replace_cb(&clcsk->sk_data_ready, smc_fback_data_ready,
&smc->clcsk_data_ready);
smc_clcsock_replace_cb(&clcsk->sk_write_space, smc_fback_write_space,
&smc->clcsk_write_space);
smc_clcsock_replace_cb(&clcsk->sk_error_report, smc_fback_error_report,
&smc->clcsk_error_report);
write_unlock_bh(&clcsk->sk_callback_lock);
} }
static int smc_switch_to_fallback(struct smc_sock *smc, int reason_code) static int smc_switch_to_fallback(struct smc_sock *smc, int reason_code)
{ {
struct sock *clcsk;
int rc = 0; int rc = 0;
mutex_lock(&smc->clcsock_release_lock); mutex_lock(&smc->clcsock_release_lock);
@ -792,10 +835,7 @@ static int smc_switch_to_fallback(struct smc_sock *smc, int reason_code)
rc = -EBADF; rc = -EBADF;
goto out; goto out;
} }
clcsk = smc->clcsock->sk;
if (smc->use_fallback)
goto out;
smc->use_fallback = true; smc->use_fallback = true;
smc->fallback_rsn = reason_code; smc->fallback_rsn = reason_code;
smc_stat_fallback(smc); smc_stat_fallback(smc);
@ -810,18 +850,7 @@ static int smc_switch_to_fallback(struct smc_sock *smc, int reason_code)
* in smc sk->sk_wq and they should be woken up * in smc sk->sk_wq and they should be woken up
* as clcsock's wait queue is woken up. * as clcsock's wait queue is woken up.
*/ */
smc->clcsk_state_change = clcsk->sk_state_change; smc_fback_replace_callbacks(smc);
smc->clcsk_data_ready = clcsk->sk_data_ready;
smc->clcsk_write_space = clcsk->sk_write_space;
smc->clcsk_error_report = clcsk->sk_error_report;
clcsk->sk_state_change = smc_fback_state_change;
clcsk->sk_data_ready = smc_fback_data_ready;
clcsk->sk_write_space = smc_fback_write_space;
clcsk->sk_error_report = smc_fback_error_report;
smc->clcsock->sk->sk_user_data =
(void *)((uintptr_t)smc | SK_USER_DATA_NOCOPY);
} }
out: out:
mutex_unlock(&smc->clcsock_release_lock); mutex_unlock(&smc->clcsock_release_lock);
@ -1475,6 +1504,8 @@ static void smc_connect_work(struct work_struct *work)
smc->sk.sk_state = SMC_CLOSED; smc->sk.sk_state = SMC_CLOSED;
if (rc == -EPIPE || rc == -EAGAIN) if (rc == -EPIPE || rc == -EAGAIN)
smc->sk.sk_err = EPIPE; smc->sk.sk_err = EPIPE;
else if (rc == -ECONNREFUSED)
smc->sk.sk_err = ECONNREFUSED;
else if (signal_pending(current)) else if (signal_pending(current))
smc->sk.sk_err = -sock_intr_errno(timeo); smc->sk.sk_err = -sock_intr_errno(timeo);
sock_put(&smc->sk); /* passive closing */ sock_put(&smc->sk); /* passive closing */
@ -1594,6 +1625,19 @@ static int smc_clcsock_accept(struct smc_sock *lsmc, struct smc_sock **new_smc)
* function; switch it back to the original sk_data_ready function * function; switch it back to the original sk_data_ready function
*/ */
new_clcsock->sk->sk_data_ready = lsmc->clcsk_data_ready; new_clcsock->sk->sk_data_ready = lsmc->clcsk_data_ready;
/* if new clcsock has also inherited the fallback-specific callback
* functions, switch them back to the original ones.
*/
if (lsmc->use_fallback) {
if (lsmc->clcsk_state_change)
new_clcsock->sk->sk_state_change = lsmc->clcsk_state_change;
if (lsmc->clcsk_write_space)
new_clcsock->sk->sk_write_space = lsmc->clcsk_write_space;
if (lsmc->clcsk_error_report)
new_clcsock->sk->sk_error_report = lsmc->clcsk_error_report;
}
(*new_smc)->clcsock = new_clcsock; (*new_smc)->clcsock = new_clcsock;
out: out:
return rc; return rc;
@ -2353,17 +2397,20 @@ out:
static void smc_clcsock_data_ready(struct sock *listen_clcsock) static void smc_clcsock_data_ready(struct sock *listen_clcsock)
{ {
struct smc_sock *lsmc = struct smc_sock *lsmc;
smc_clcsock_user_data(listen_clcsock);
read_lock_bh(&listen_clcsock->sk_callback_lock);
lsmc = smc_clcsock_user_data(listen_clcsock);
if (!lsmc) if (!lsmc)
return; goto out;
lsmc->clcsk_data_ready(listen_clcsock); lsmc->clcsk_data_ready(listen_clcsock);
if (lsmc->sk.sk_state == SMC_LISTEN) { if (lsmc->sk.sk_state == SMC_LISTEN) {
sock_hold(&lsmc->sk); /* sock_put in smc_tcp_listen_work() */ sock_hold(&lsmc->sk); /* sock_put in smc_tcp_listen_work() */
if (!queue_work(smc_tcp_ls_wq, &lsmc->tcp_listen_work)) if (!queue_work(smc_tcp_ls_wq, &lsmc->tcp_listen_work))
sock_put(&lsmc->sk); sock_put(&lsmc->sk);
} }
out:
read_unlock_bh(&listen_clcsock->sk_callback_lock);
} }
static int smc_listen(struct socket *sock, int backlog) static int smc_listen(struct socket *sock, int backlog)
@ -2395,10 +2442,12 @@ static int smc_listen(struct socket *sock, int backlog)
/* save original sk_data_ready function and establish /* save original sk_data_ready function and establish
* smc-specific sk_data_ready function * smc-specific sk_data_ready function
*/ */
smc->clcsk_data_ready = smc->clcsock->sk->sk_data_ready; write_lock_bh(&smc->clcsock->sk->sk_callback_lock);
smc->clcsock->sk->sk_data_ready = smc_clcsock_data_ready;
smc->clcsock->sk->sk_user_data = smc->clcsock->sk->sk_user_data =
(void *)((uintptr_t)smc | SK_USER_DATA_NOCOPY); (void *)((uintptr_t)smc | SK_USER_DATA_NOCOPY);
smc_clcsock_replace_cb(&smc->clcsock->sk->sk_data_ready,
smc_clcsock_data_ready, &smc->clcsk_data_ready);
write_unlock_bh(&smc->clcsock->sk->sk_callback_lock);
/* save original ops */ /* save original ops */
smc->ori_af_ops = inet_csk(smc->clcsock->sk)->icsk_af_ops; smc->ori_af_ops = inet_csk(smc->clcsock->sk)->icsk_af_ops;
@ -2413,7 +2462,11 @@ static int smc_listen(struct socket *sock, int backlog)
rc = kernel_listen(smc->clcsock, backlog); rc = kernel_listen(smc->clcsock, backlog);
if (rc) { if (rc) {
smc->clcsock->sk->sk_data_ready = smc->clcsk_data_ready; write_lock_bh(&smc->clcsock->sk->sk_callback_lock);
smc_clcsock_restore_cb(&smc->clcsock->sk->sk_data_ready,
&smc->clcsk_data_ready);
smc->clcsock->sk->sk_user_data = NULL;
write_unlock_bh(&smc->clcsock->sk->sk_callback_lock);
goto out; goto out;
} }
sk->sk_max_ack_backlog = backlog; sk->sk_max_ack_backlog = backlog;

View File

@ -288,12 +288,41 @@ static inline struct smc_sock *smc_sk(const struct sock *sk)
return (struct smc_sock *)sk; return (struct smc_sock *)sk;
} }
static inline void smc_init_saved_callbacks(struct smc_sock *smc)
{
smc->clcsk_state_change = NULL;
smc->clcsk_data_ready = NULL;
smc->clcsk_write_space = NULL;
smc->clcsk_error_report = NULL;
}
static inline struct smc_sock *smc_clcsock_user_data(const struct sock *clcsk) static inline struct smc_sock *smc_clcsock_user_data(const struct sock *clcsk)
{ {
return (struct smc_sock *) return (struct smc_sock *)
((uintptr_t)clcsk->sk_user_data & ~SK_USER_DATA_NOCOPY); ((uintptr_t)clcsk->sk_user_data & ~SK_USER_DATA_NOCOPY);
} }
/* save target_cb in saved_cb, and replace target_cb with new_cb */
static inline void smc_clcsock_replace_cb(void (**target_cb)(struct sock *),
void (*new_cb)(struct sock *),
void (**saved_cb)(struct sock *))
{
/* only save once */
if (!*saved_cb)
*saved_cb = *target_cb;
*target_cb = new_cb;
}
/* restore target_cb to saved_cb, and reset saved_cb to NULL */
static inline void smc_clcsock_restore_cb(void (**target_cb)(struct sock *),
void (**saved_cb)(struct sock *))
{
if (!*saved_cb)
return;
*target_cb = *saved_cb;
*saved_cb = NULL;
}
extern struct workqueue_struct *smc_hs_wq; /* wq for handshake work */ extern struct workqueue_struct *smc_hs_wq; /* wq for handshake work */
extern struct workqueue_struct *smc_close_wq; /* wq for close work */ extern struct workqueue_struct *smc_close_wq; /* wq for close work */

View File

@ -214,8 +214,11 @@ again:
sk->sk_state = SMC_CLOSED; sk->sk_state = SMC_CLOSED;
sk->sk_state_change(sk); /* wake up accept */ sk->sk_state_change(sk); /* wake up accept */
if (smc->clcsock && smc->clcsock->sk) { if (smc->clcsock && smc->clcsock->sk) {
smc->clcsock->sk->sk_data_ready = smc->clcsk_data_ready; write_lock_bh(&smc->clcsock->sk->sk_callback_lock);
smc_clcsock_restore_cb(&smc->clcsock->sk->sk_data_ready,
&smc->clcsk_data_ready);
smc->clcsock->sk->sk_user_data = NULL; smc->clcsock->sk->sk_user_data = NULL;
write_unlock_bh(&smc->clcsock->sk->sk_callback_lock);
rc = kernel_sock_shutdown(smc->clcsock, SHUT_RDWR); rc = kernel_sock_shutdown(smc->clcsock, SHUT_RDWR);
} }
smc_close_cleanup_listen(sk); smc_close_cleanup_listen(sk);

View File

@ -483,11 +483,13 @@ handle_error:
copy = min_t(size_t, size, (pfrag->size - pfrag->offset)); copy = min_t(size_t, size, (pfrag->size - pfrag->offset));
copy = min_t(size_t, copy, (max_open_record_len - record->len)); copy = min_t(size_t, copy, (max_open_record_len - record->len));
rc = tls_device_copy_data(page_address(pfrag->page) + if (copy) {
pfrag->offset, copy, msg_iter); rc = tls_device_copy_data(page_address(pfrag->page) +
if (rc) pfrag->offset, copy, msg_iter);
goto handle_error; if (rc)
tls_append_frag(record, pfrag, copy); goto handle_error;
tls_append_frag(record, pfrag, copy);
}
size -= copy; size -= copy;
if (!size) { if (!size) {

View File

@ -639,7 +639,7 @@ static int __xsk_sendmsg(struct socket *sock, struct msghdr *m, size_t total_len
if (sk_can_busy_loop(sk)) if (sk_can_busy_loop(sk))
sk_busy_loop(sk, 1); /* only support non-blocking sockets */ sk_busy_loop(sk, 1); /* only support non-blocking sockets */
if (xsk_no_wakeup(sk)) if (xs->zc && xsk_no_wakeup(sk))
return 0; return 0;
pool = xs->pool; pool = xs->pool;
@ -967,6 +967,19 @@ static int xsk_bind(struct socket *sock, struct sockaddr *addr, int addr_len)
xp_get_pool(umem_xs->pool); xp_get_pool(umem_xs->pool);
xs->pool = umem_xs->pool; xs->pool = umem_xs->pool;
/* If underlying shared umem was created without Tx
* ring, allocate Tx descs array that Tx batching API
* utilizes
*/
if (xs->tx && !xs->pool->tx_descs) {
err = xp_alloc_tx_descs(xs->pool, xs);
if (err) {
xp_put_pool(xs->pool);
sockfd_put(sock);
goto out_unlock;
}
}
} }
xdp_get_umem(umem_xs->umem); xdp_get_umem(umem_xs->umem);

View File

@ -42,6 +42,16 @@ void xp_destroy(struct xsk_buff_pool *pool)
kvfree(pool); kvfree(pool);
} }
int xp_alloc_tx_descs(struct xsk_buff_pool *pool, struct xdp_sock *xs)
{
pool->tx_descs = kvcalloc(xs->tx->nentries, sizeof(*pool->tx_descs),
GFP_KERNEL);
if (!pool->tx_descs)
return -ENOMEM;
return 0;
}
struct xsk_buff_pool *xp_create_and_assign_umem(struct xdp_sock *xs, struct xsk_buff_pool *xp_create_and_assign_umem(struct xdp_sock *xs,
struct xdp_umem *umem) struct xdp_umem *umem)
{ {
@ -59,11 +69,9 @@ struct xsk_buff_pool *xp_create_and_assign_umem(struct xdp_sock *xs,
if (!pool->heads) if (!pool->heads)
goto out; goto out;
if (xs->tx) { if (xs->tx)
pool->tx_descs = kcalloc(xs->tx->nentries, sizeof(*pool->tx_descs), GFP_KERNEL); if (xp_alloc_tx_descs(pool, xs))
if (!pool->tx_descs)
goto out; goto out;
}
pool->chunk_mask = ~((u64)umem->chunk_size - 1); pool->chunk_mask = ~((u64)umem->chunk_size - 1);
pool->addrs_cnt = umem->size; pool->addrs_cnt = umem->size;

View File

@ -1,3 +1,4 @@
CONFIG_ACPI=y
CONFIG_SERIAL_8250=y CONFIG_SERIAL_8250=y
CONFIG_SERIAL_8250_CONSOLE=y CONFIG_SERIAL_8250_CONSOLE=y
CONFIG_CMDLINE_BOOL=y CONFIG_CMDLINE_BOOL=y

View File

@ -1,3 +1,4 @@
CONFIG_ACPI=y
CONFIG_SERIAL_8250=y CONFIG_SERIAL_8250=y
CONFIG_SERIAL_8250_CONSOLE=y CONFIG_SERIAL_8250_CONSOLE=y
CONFIG_CMDLINE_BOOL=y CONFIG_CMDLINE_BOOL=y