Networking fixes for 5.18-rc5, including fixes from bluetooth, bpf
and netfilter. Current release - new code bugs: - bridge: switchdev: check br_vlan_group() return value - use this_cpu_inc() to increment net->core_stats, fix preempt-rt Previous releases - regressions: - eth: stmmac: fix write to sgmii_adapter_base Previous releases - always broken: - netfilter: nf_conntrack_tcp: re-init for syn packets only, resolving issues with TCP fastopen - tcp: md5: fix incorrect tcp_header_len for incoming connections - tcp: fix F-RTO may not work correctly when receiving DSACK - tcp: ensure use of most recently sent skb when filling rate samples - tcp: fix potential xmit stalls caused by TCP_NOTSENT_LOWAT - virtio_net: fix wrong buf address calculation when using xdp - xsk: fix forwarding when combining copy mode with busy poll - xsk: fix possible crash when multiple sockets are created - bpf: lwt: fix crash when using bpf_skb_set_tunnel_key() from bpf_xmit lwt hook - sctp: null-check asoc strreset_chunk in sctp_generate_reconf_event - wireguard: device: check for metadata_dst with skb_valid_dst() - netfilter: update ip6_route_me_harder to consider L3 domain - gre: make o_seqno start from 0 in native mode - gre: switch o_seqno to atomic to prevent races in collect_md mode Misc: - add Eric Dumazet to networking maintainers - dt: dsa: realtek: remove realtek,rtl8367s string - netfilter: flowtable: Remove the empty file Signed-off-by: Jakub Kicinski <kuba@kernel.org> -----BEGIN PGP SIGNATURE----- iQIzBAABCAAdFiEE6jPA+I1ugmIBA4hXMUZtbf5SIrsFAmJq2r4ACgkQMUZtbf5S IrthIxAAjGEcLr25lkB0IWcjOD5wqOuhaRKeSWXnbm5bPkWIaxVMsssBAR8DS78S bsaJ0yTKQqv4vLtlMjtQpVC/azr0NTmsb0y5+6C5d4IObBf2Mv1LPpkiqs0d+phQ khPsCh0QGtSJT9VbaMu5+JW+c6Jo0kVmnmOgmhZMo1QKFw/bocdJrxQoZjcve9/X /uTDFEn8dPWbQKOm1TmSvQhuEPh1V6ZAf8/cBikN2Yul1R0EYbZO6RKSfrgqaa7T aRMMTEwRoTEuRdjF97F7ZgGXhoRxP9rW+bdzoQ0ewRu+KgKKPjCL2eVgeNfTNptj FjaUpNrImkYUxQ8+x7sQVPdwFaScVVtjHFfqFl0CsvhddT6nQw2trElccfy3/16K 0GEKBXKCB3B9h02fhillPFveZzDChy/5NTezARqMYP0eG5SBUHLCCymZnqnoNkwV hdcmciZTnJzIxPJcmlp8F7D5etueDOwh03nMcFxRf3eW0IcC+Hl6qtbOFUzr6KhB V/TLh7N+Smy3JtsavU9aj4iSQGR+kChCt5zhH9idkuEsUgdYo3apB4q3k3ShzWqM SJx4gxp5HxwobLx+uW/HJMJTmwA8fApMbIl0OOPm+qiHfULufCZb6BS3AZGb7jdX wrcEPeZxBTCZqHeQc0kuo9/CtTTvbawJYtP5LRiZ5bWfoKn5F9o= =XOSt -----END PGP SIGNATURE----- Merge tag 'net-5.18-rc5' of git://git.kernel.org/pub/scm/linux/kernel/git/netdev/net Pull networking fixes from Jakub Kicinski: "Including fixes from bluetooth, bpf and netfilter. Current release - new code bugs: - bridge: switchdev: check br_vlan_group() return value - use this_cpu_inc() to increment net->core_stats, fix preempt-rt Previous releases - regressions: - eth: stmmac: fix write to sgmii_adapter_base Previous releases - always broken: - netfilter: nf_conntrack_tcp: re-init for syn packets only, resolving issues with TCP fastopen - tcp: md5: fix incorrect tcp_header_len for incoming connections - tcp: fix F-RTO may not work correctly when receiving DSACK - tcp: ensure use of most recently sent skb when filling rate samples - tcp: fix potential xmit stalls caused by TCP_NOTSENT_LOWAT - virtio_net: fix wrong buf address calculation when using xdp - xsk: fix forwarding when combining copy mode with busy poll - xsk: fix possible crash when multiple sockets are created - bpf: lwt: fix crash when using bpf_skb_set_tunnel_key() from bpf_xmit lwt hook - sctp: null-check asoc strreset_chunk in sctp_generate_reconf_event - wireguard: device: check for metadata_dst with skb_valid_dst() - netfilter: update ip6_route_me_harder to consider L3 domain - gre: make o_seqno start from 0 in native mode - gre: switch o_seqno to atomic to prevent races in collect_md mode Misc: - add Eric Dumazet to networking maintainers - dt: dsa: realtek: remove realtek,rtl8367s string - netfilter: flowtable: Remove the empty file" * tag 'net-5.18-rc5' of git://git.kernel.org/pub/scm/linux/kernel/git/netdev/net: (65 commits) tcp: fix F-RTO may not work correctly when receiving DSACK Revert "ibmvnic: Add ethtool private flag for driver-defined queue limits" net: enetc: allow tc-etf offload even with NETIF_F_CSUM_MASK ixgbe: ensure IPsec VF<->PF compatibility MAINTAINERS: Update BNXT entry with firmware files netfilter: nft_socket: only do sk lookups when indev is available net: fec: add missing of_node_put() in fec_enet_init_stop_mode() bnx2x: fix napi API usage sequence tls: Skip tls_append_frag on zero copy size Add Eric Dumazet to networking maintainers netfilter: conntrack: fix udp offload timeout sysctl netfilter: nf_conntrack_tcp: re-init for syn packets only net: dsa: lantiq_gswip: Don't set GSWIP_MII_CFG_RMII_CLK net: Use this_cpu_inc() to increment net->core_stats Bluetooth: hci_sync: Cleanup hci_conn if it cannot be aborted Bluetooth: hci_event: Fix creating hci_conn object on error status Bluetooth: hci_event: Fix checking for invalid handle on error status ice: fix use-after-free when deinitializing mailbox snapshot ice: wait 5 s for EMP reset after firmware flash ice: Protect vf_state check by cfg_lock in ice_vc_process_vf_msg() ...
This commit is contained in:
commit
249aca0d3d
@ -27,32 +27,25 @@ description:
|
||||
The realtek-mdio driver is an MDIO driver and it must be inserted inside
|
||||
an MDIO node.
|
||||
|
||||
The compatible string is only used to identify which (silicon) family the
|
||||
switch belongs to. Roughly speaking, a family is any set of Realtek switches
|
||||
whose chip identification register(s) have a common location and semantics.
|
||||
The different models in a given family can be automatically disambiguated by
|
||||
parsing the chip identification register(s) according to the given family,
|
||||
avoiding the need for a unique compatible string for each model.
|
||||
|
||||
properties:
|
||||
compatible:
|
||||
enum:
|
||||
- realtek,rtl8365mb
|
||||
- realtek,rtl8366
|
||||
- realtek,rtl8366rb
|
||||
- realtek,rtl8366s
|
||||
- realtek,rtl8367
|
||||
- realtek,rtl8367b
|
||||
- realtek,rtl8367rb
|
||||
- realtek,rtl8367s
|
||||
- realtek,rtl8368s
|
||||
- realtek,rtl8369
|
||||
- realtek,rtl8370
|
||||
description: |
|
||||
realtek,rtl8365mb: 4+1 ports
|
||||
realtek,rtl8366: 5+1 ports
|
||||
realtek,rtl8366rb: 5+1 ports
|
||||
realtek,rtl8366s: 5+1 ports
|
||||
realtek,rtl8367:
|
||||
realtek,rtl8367b:
|
||||
realtek,rtl8367rb: 5+2 ports
|
||||
realtek,rtl8367s: 5+2 ports
|
||||
realtek,rtl8368s: 8 ports
|
||||
realtek,rtl8369: 8+1 ports
|
||||
realtek,rtl8370: 8+2 ports
|
||||
realtek,rtl8365mb:
|
||||
Use with models RTL8363NB, RTL8363NB-VB, RTL8363SC, RTL8363SC-VB,
|
||||
RTL8364NB, RTL8364NB-VB, RTL8365MB, RTL8366SC, RTL8367RB-VB, RTL8367S,
|
||||
RTL8367SB, RTL8370MB, RTL8310SR
|
||||
realtek,rtl8366rb:
|
||||
Use with models RTL8366RB, RTL8366S
|
||||
|
||||
mdc-gpios:
|
||||
description: GPIO line for the MDC clock line.
|
||||
@ -335,7 +328,7 @@ examples:
|
||||
#size-cells = <0>;
|
||||
|
||||
switch@29 {
|
||||
compatible = "realtek,rtl8367s";
|
||||
compatible = "realtek,rtl8365mb";
|
||||
reg = <29>;
|
||||
|
||||
reset-gpios = <&gpio2 20 GPIO_ACTIVE_LOW>;
|
||||
|
@ -3913,7 +3913,9 @@ BROADCOM BNXT_EN 50 GIGABIT ETHERNET DRIVER
|
||||
M: Michael Chan <michael.chan@broadcom.com>
|
||||
L: netdev@vger.kernel.org
|
||||
S: Supported
|
||||
F: drivers/firmware/broadcom/tee_bnxt_fw.c
|
||||
F: drivers/net/ethernet/broadcom/bnxt/
|
||||
F: include/linux/firmware/broadcom/tee_bnxt_fw.h
|
||||
|
||||
BROADCOM BRCM80211 IEEE802.11n WIRELESS DRIVER
|
||||
M: Arend van Spriel <aspriel@gmail.com>
|
||||
@ -13623,6 +13625,7 @@ F: net/core/drop_monitor.c
|
||||
|
||||
NETWORKING DRIVERS
|
||||
M: "David S. Miller" <davem@davemloft.net>
|
||||
M: Eric Dumazet <edumazet@google.com>
|
||||
M: Jakub Kicinski <kuba@kernel.org>
|
||||
M: Paolo Abeni <pabeni@redhat.com>
|
||||
L: netdev@vger.kernel.org
|
||||
@ -13670,6 +13673,7 @@ F: tools/testing/selftests/drivers/net/dsa/
|
||||
|
||||
NETWORKING [GENERAL]
|
||||
M: "David S. Miller" <davem@davemloft.net>
|
||||
M: Eric Dumazet <edumazet@google.com>
|
||||
M: Jakub Kicinski <kuba@kernel.org>
|
||||
M: Paolo Abeni <pabeni@redhat.com>
|
||||
L: netdev@vger.kernel.org
|
||||
|
@ -1681,9 +1681,6 @@ static void gswip_phylink_mac_config(struct dsa_switch *ds, int port,
|
||||
break;
|
||||
case PHY_INTERFACE_MODE_RMII:
|
||||
miicfg |= GSWIP_MII_CFG_MODE_RMIIM;
|
||||
|
||||
/* Configure the RMII clock as output: */
|
||||
miicfg |= GSWIP_MII_CFG_RMII_CLK;
|
||||
break;
|
||||
case PHY_INTERFACE_MODE_RGMII:
|
||||
case PHY_INTERFACE_MODE_RGMII_ID:
|
||||
|
@ -40,8 +40,9 @@ int mv88e6xxx_port_hidden_wait(struct mv88e6xxx_chip *chip)
|
||||
{
|
||||
int bit = __bf_shf(MV88E6XXX_PORT_RESERVED_1A_BUSY);
|
||||
|
||||
return mv88e6xxx_wait_bit(chip, MV88E6XXX_PORT_RESERVED_1A_CTRL_PORT,
|
||||
MV88E6XXX_PORT_RESERVED_1A, bit, 0);
|
||||
return mv88e6xxx_port_wait_bit(chip,
|
||||
MV88E6XXX_PORT_RESERVED_1A_CTRL_PORT,
|
||||
MV88E6XXX_PORT_RESERVED_1A, bit, 0);
|
||||
}
|
||||
|
||||
int mv88e6xxx_port_hidden_read(struct mv88e6xxx_chip *chip, int block, int port,
|
||||
|
@ -267,7 +267,6 @@ static const struct of_device_id realtek_mdio_of_match[] = {
|
||||
#endif
|
||||
#if IS_ENABLED(CONFIG_NET_DSA_REALTEK_RTL8365MB)
|
||||
{ .compatible = "realtek,rtl8365mb", .data = &rtl8365mb_variant, },
|
||||
{ .compatible = "realtek,rtl8367s", .data = &rtl8365mb_variant, },
|
||||
#endif
|
||||
{ /* sentinel */ },
|
||||
};
|
||||
|
@ -551,10 +551,6 @@ static const struct of_device_id realtek_smi_of_match[] = {
|
||||
.compatible = "realtek,rtl8365mb",
|
||||
.data = &rtl8365mb_variant,
|
||||
},
|
||||
{
|
||||
.compatible = "realtek,rtl8367s",
|
||||
.data = &rtl8365mb_variant,
|
||||
},
|
||||
#endif
|
||||
{ /* sentinel */ },
|
||||
};
|
||||
|
@ -14153,10 +14153,6 @@ static int bnx2x_eeh_nic_unload(struct bnx2x *bp)
|
||||
|
||||
/* Stop Tx */
|
||||
bnx2x_tx_disable(bp);
|
||||
/* Delete all NAPI objects */
|
||||
bnx2x_del_all_napi(bp);
|
||||
if (CNIC_LOADED(bp))
|
||||
bnx2x_del_all_napi_cnic(bp);
|
||||
netdev_reset_tc(bp->dev);
|
||||
|
||||
del_timer_sync(&bp->timer);
|
||||
@ -14261,6 +14257,11 @@ static pci_ers_result_t bnx2x_io_slot_reset(struct pci_dev *pdev)
|
||||
bnx2x_drain_tx_queues(bp);
|
||||
bnx2x_send_unload_req(bp, UNLOAD_RECOVERY);
|
||||
bnx2x_netif_stop(bp, 1);
|
||||
bnx2x_del_all_napi(bp);
|
||||
|
||||
if (CNIC_LOADED(bp))
|
||||
bnx2x_del_all_napi_cnic(bp);
|
||||
|
||||
bnx2x_free_irq(bp);
|
||||
|
||||
/* Report UNLOAD_DONE to MCP */
|
||||
|
@ -2035,6 +2035,11 @@ static struct sk_buff *bcmgenet_add_tsb(struct net_device *dev,
|
||||
return skb;
|
||||
}
|
||||
|
||||
static void bcmgenet_hide_tsb(struct sk_buff *skb)
|
||||
{
|
||||
__skb_pull(skb, sizeof(struct status_64));
|
||||
}
|
||||
|
||||
static netdev_tx_t bcmgenet_xmit(struct sk_buff *skb, struct net_device *dev)
|
||||
{
|
||||
struct bcmgenet_priv *priv = netdev_priv(dev);
|
||||
@ -2141,6 +2146,8 @@ static netdev_tx_t bcmgenet_xmit(struct sk_buff *skb, struct net_device *dev)
|
||||
}
|
||||
|
||||
GENET_CB(skb)->last_cb = tx_cb_ptr;
|
||||
|
||||
bcmgenet_hide_tsb(skb);
|
||||
skb_tx_timestamp(skb);
|
||||
|
||||
/* Decrement total BD count and advance our write pointer */
|
||||
|
@ -297,10 +297,6 @@ int enetc_setup_tc_txtime(struct net_device *ndev, void *type_data)
|
||||
if (tc < 0 || tc >= priv->num_tx_rings)
|
||||
return -EINVAL;
|
||||
|
||||
/* Do not support TXSTART and TX CSUM offload simutaniously */
|
||||
if (ndev->features & NETIF_F_CSUM_MASK)
|
||||
return -EBUSY;
|
||||
|
||||
/* TSD and Qbv are mutually exclusive in hardware */
|
||||
if (enetc_rd(&priv->si->hw, ENETC_QBV_PTGCR_OFFSET) & ENETC_QBV_TGE)
|
||||
return -EBUSY;
|
||||
|
@ -3731,7 +3731,7 @@ static int fec_enet_init_stop_mode(struct fec_enet_private *fep,
|
||||
ARRAY_SIZE(out_val));
|
||||
if (ret) {
|
||||
dev_dbg(&fep->pdev->dev, "no stop mode property\n");
|
||||
return ret;
|
||||
goto out;
|
||||
}
|
||||
|
||||
fep->stop_gpr.gpr = syscon_node_to_regmap(gpr_np);
|
||||
|
@ -1065,19 +1065,23 @@ int hns_mac_init(struct dsaf_device *dsaf_dev)
|
||||
device_for_each_child_node(dsaf_dev->dev, child) {
|
||||
ret = fwnode_property_read_u32(child, "reg", &port_id);
|
||||
if (ret) {
|
||||
fwnode_handle_put(child);
|
||||
dev_err(dsaf_dev->dev,
|
||||
"get reg fail, ret=%d!\n", ret);
|
||||
return ret;
|
||||
}
|
||||
if (port_id >= max_port_num) {
|
||||
fwnode_handle_put(child);
|
||||
dev_err(dsaf_dev->dev,
|
||||
"reg(%u) out of range!\n", port_id);
|
||||
return -EINVAL;
|
||||
}
|
||||
mac_cb = devm_kzalloc(dsaf_dev->dev, sizeof(*mac_cb),
|
||||
GFP_KERNEL);
|
||||
if (!mac_cb)
|
||||
if (!mac_cb) {
|
||||
fwnode_handle_put(child);
|
||||
return -ENOMEM;
|
||||
}
|
||||
mac_cb->fw_port = child;
|
||||
mac_cb->mac_id = (u8)port_id;
|
||||
dsaf_dev->mac_cb[port_id] = mac_cb;
|
||||
|
@ -75,7 +75,7 @@ int hclge_comm_tqps_update_stats(struct hnae3_handle *handle,
|
||||
ret = hclge_comm_cmd_send(hw, &desc, 1);
|
||||
if (ret) {
|
||||
dev_err(&hw->cmq.csq.pdev->dev,
|
||||
"failed to get tqp stat, ret = %d, tx = %u.\n",
|
||||
"failed to get tqp stat, ret = %d, rx = %u.\n",
|
||||
ret, i);
|
||||
return ret;
|
||||
}
|
||||
@ -89,7 +89,7 @@ int hclge_comm_tqps_update_stats(struct hnae3_handle *handle,
|
||||
ret = hclge_comm_cmd_send(hw, &desc, 1);
|
||||
if (ret) {
|
||||
dev_err(&hw->cmq.csq.pdev->dev,
|
||||
"failed to get tqp stat, ret = %d, rx = %u.\n",
|
||||
"failed to get tqp stat, ret = %d, tx = %u.\n",
|
||||
ret, i);
|
||||
return ret;
|
||||
}
|
||||
|
@ -562,12 +562,12 @@ static void hns3_dbg_tx_spare_info(struct hns3_enet_ring *ring, char *buf,
|
||||
|
||||
for (i = 0; i < ring_num; i++) {
|
||||
j = 0;
|
||||
sprintf(result[j++], "%8u", i);
|
||||
sprintf(result[j++], "%9u", ring->tx_copybreak);
|
||||
sprintf(result[j++], "%3u", tx_spare->len);
|
||||
sprintf(result[j++], "%3u", tx_spare->next_to_use);
|
||||
sprintf(result[j++], "%3u", tx_spare->next_to_clean);
|
||||
sprintf(result[j++], "%3u", tx_spare->last_to_clean);
|
||||
sprintf(result[j++], "%u", i);
|
||||
sprintf(result[j++], "%u", ring->tx_copybreak);
|
||||
sprintf(result[j++], "%u", tx_spare->len);
|
||||
sprintf(result[j++], "%u", tx_spare->next_to_use);
|
||||
sprintf(result[j++], "%u", tx_spare->next_to_clean);
|
||||
sprintf(result[j++], "%u", tx_spare->last_to_clean);
|
||||
sprintf(result[j++], "%pad", &tx_spare->dma);
|
||||
hns3_dbg_fill_content(content, sizeof(content),
|
||||
tx_spare_info_items,
|
||||
@ -598,35 +598,35 @@ static void hns3_dump_rx_queue_info(struct hns3_enet_ring *ring,
|
||||
u32 base_add_l, base_add_h;
|
||||
u32 j = 0;
|
||||
|
||||
sprintf(result[j++], "%8u", index);
|
||||
sprintf(result[j++], "%u", index);
|
||||
|
||||
sprintf(result[j++], "%6u", readl_relaxed(ring->tqp->io_base +
|
||||
sprintf(result[j++], "%u", readl_relaxed(ring->tqp->io_base +
|
||||
HNS3_RING_RX_RING_BD_NUM_REG));
|
||||
|
||||
sprintf(result[j++], "%6u", readl_relaxed(ring->tqp->io_base +
|
||||
sprintf(result[j++], "%u", readl_relaxed(ring->tqp->io_base +
|
||||
HNS3_RING_RX_RING_BD_LEN_REG));
|
||||
|
||||
sprintf(result[j++], "%4u", readl_relaxed(ring->tqp->io_base +
|
||||
sprintf(result[j++], "%u", readl_relaxed(ring->tqp->io_base +
|
||||
HNS3_RING_RX_RING_TAIL_REG));
|
||||
|
||||
sprintf(result[j++], "%4u", readl_relaxed(ring->tqp->io_base +
|
||||
sprintf(result[j++], "%u", readl_relaxed(ring->tqp->io_base +
|
||||
HNS3_RING_RX_RING_HEAD_REG));
|
||||
|
||||
sprintf(result[j++], "%6u", readl_relaxed(ring->tqp->io_base +
|
||||
sprintf(result[j++], "%u", readl_relaxed(ring->tqp->io_base +
|
||||
HNS3_RING_RX_RING_FBDNUM_REG));
|
||||
|
||||
sprintf(result[j++], "%6u", readl_relaxed(ring->tqp->io_base +
|
||||
sprintf(result[j++], "%u", readl_relaxed(ring->tqp->io_base +
|
||||
HNS3_RING_RX_RING_PKTNUM_RECORD_REG));
|
||||
sprintf(result[j++], "%9u", ring->rx_copybreak);
|
||||
sprintf(result[j++], "%u", ring->rx_copybreak);
|
||||
|
||||
sprintf(result[j++], "%7s", readl_relaxed(ring->tqp->io_base +
|
||||
sprintf(result[j++], "%s", readl_relaxed(ring->tqp->io_base +
|
||||
HNS3_RING_EN_REG) ? "on" : "off");
|
||||
|
||||
if (hnae3_ae_dev_tqp_txrx_indep_supported(ae_dev))
|
||||
sprintf(result[j++], "%10s", readl_relaxed(ring->tqp->io_base +
|
||||
sprintf(result[j++], "%s", readl_relaxed(ring->tqp->io_base +
|
||||
HNS3_RING_RX_EN_REG) ? "on" : "off");
|
||||
else
|
||||
sprintf(result[j++], "%10s", "NA");
|
||||
sprintf(result[j++], "%s", "NA");
|
||||
|
||||
base_add_h = readl_relaxed(ring->tqp->io_base +
|
||||
HNS3_RING_RX_RING_BASEADDR_H_REG);
|
||||
@ -700,36 +700,36 @@ static void hns3_dump_tx_queue_info(struct hns3_enet_ring *ring,
|
||||
u32 base_add_l, base_add_h;
|
||||
u32 j = 0;
|
||||
|
||||
sprintf(result[j++], "%8u", index);
|
||||
sprintf(result[j++], "%6u", readl_relaxed(ring->tqp->io_base +
|
||||
sprintf(result[j++], "%u", index);
|
||||
sprintf(result[j++], "%u", readl_relaxed(ring->tqp->io_base +
|
||||
HNS3_RING_TX_RING_BD_NUM_REG));
|
||||
|
||||
sprintf(result[j++], "%2u", readl_relaxed(ring->tqp->io_base +
|
||||
sprintf(result[j++], "%u", readl_relaxed(ring->tqp->io_base +
|
||||
HNS3_RING_TX_RING_TC_REG));
|
||||
|
||||
sprintf(result[j++], "%4u", readl_relaxed(ring->tqp->io_base +
|
||||
sprintf(result[j++], "%u", readl_relaxed(ring->tqp->io_base +
|
||||
HNS3_RING_TX_RING_TAIL_REG));
|
||||
|
||||
sprintf(result[j++], "%4u", readl_relaxed(ring->tqp->io_base +
|
||||
sprintf(result[j++], "%u", readl_relaxed(ring->tqp->io_base +
|
||||
HNS3_RING_TX_RING_HEAD_REG));
|
||||
|
||||
sprintf(result[j++], "%6u", readl_relaxed(ring->tqp->io_base +
|
||||
sprintf(result[j++], "%u", readl_relaxed(ring->tqp->io_base +
|
||||
HNS3_RING_TX_RING_FBDNUM_REG));
|
||||
|
||||
sprintf(result[j++], "%6u", readl_relaxed(ring->tqp->io_base +
|
||||
sprintf(result[j++], "%u", readl_relaxed(ring->tqp->io_base +
|
||||
HNS3_RING_TX_RING_OFFSET_REG));
|
||||
|
||||
sprintf(result[j++], "%6u", readl_relaxed(ring->tqp->io_base +
|
||||
sprintf(result[j++], "%u", readl_relaxed(ring->tqp->io_base +
|
||||
HNS3_RING_TX_RING_PKTNUM_RECORD_REG));
|
||||
|
||||
sprintf(result[j++], "%7s", readl_relaxed(ring->tqp->io_base +
|
||||
sprintf(result[j++], "%s", readl_relaxed(ring->tqp->io_base +
|
||||
HNS3_RING_EN_REG) ? "on" : "off");
|
||||
|
||||
if (hnae3_ae_dev_tqp_txrx_indep_supported(ae_dev))
|
||||
sprintf(result[j++], "%10s", readl_relaxed(ring->tqp->io_base +
|
||||
sprintf(result[j++], "%s", readl_relaxed(ring->tqp->io_base +
|
||||
HNS3_RING_TX_EN_REG) ? "on" : "off");
|
||||
else
|
||||
sprintf(result[j++], "%10s", "NA");
|
||||
sprintf(result[j++], "%s", "NA");
|
||||
|
||||
base_add_h = readl_relaxed(ring->tqp->io_base +
|
||||
HNS3_RING_TX_RING_BASEADDR_H_REG);
|
||||
@ -848,15 +848,15 @@ static void hns3_dump_rx_bd_info(struct hns3_nic_priv *priv,
|
||||
{
|
||||
unsigned int j = 0;
|
||||
|
||||
sprintf(result[j++], "%5d", idx);
|
||||
sprintf(result[j++], "%d", idx);
|
||||
sprintf(result[j++], "%#x", le32_to_cpu(desc->rx.l234_info));
|
||||
sprintf(result[j++], "%7u", le16_to_cpu(desc->rx.pkt_len));
|
||||
sprintf(result[j++], "%4u", le16_to_cpu(desc->rx.size));
|
||||
sprintf(result[j++], "%u", le16_to_cpu(desc->rx.pkt_len));
|
||||
sprintf(result[j++], "%u", le16_to_cpu(desc->rx.size));
|
||||
sprintf(result[j++], "%#x", le32_to_cpu(desc->rx.rss_hash));
|
||||
sprintf(result[j++], "%5u", le16_to_cpu(desc->rx.fd_id));
|
||||
sprintf(result[j++], "%8u", le16_to_cpu(desc->rx.vlan_tag));
|
||||
sprintf(result[j++], "%15u", le16_to_cpu(desc->rx.o_dm_vlan_id_fb));
|
||||
sprintf(result[j++], "%11u", le16_to_cpu(desc->rx.ot_vlan_tag));
|
||||
sprintf(result[j++], "%u", le16_to_cpu(desc->rx.fd_id));
|
||||
sprintf(result[j++], "%u", le16_to_cpu(desc->rx.vlan_tag));
|
||||
sprintf(result[j++], "%u", le16_to_cpu(desc->rx.o_dm_vlan_id_fb));
|
||||
sprintf(result[j++], "%u", le16_to_cpu(desc->rx.ot_vlan_tag));
|
||||
sprintf(result[j++], "%#x", le32_to_cpu(desc->rx.bd_base_info));
|
||||
if (test_bit(HNS3_NIC_STATE_RXD_ADV_LAYOUT_ENABLE, &priv->state)) {
|
||||
u32 ol_info = le32_to_cpu(desc->rx.ol_info);
|
||||
@ -930,19 +930,19 @@ static void hns3_dump_tx_bd_info(struct hns3_nic_priv *priv,
|
||||
{
|
||||
unsigned int j = 0;
|
||||
|
||||
sprintf(result[j++], "%6d", idx);
|
||||
sprintf(result[j++], "%d", idx);
|
||||
sprintf(result[j++], "%#llx", le64_to_cpu(desc->addr));
|
||||
sprintf(result[j++], "%5u", le16_to_cpu(desc->tx.vlan_tag));
|
||||
sprintf(result[j++], "%5u", le16_to_cpu(desc->tx.send_size));
|
||||
sprintf(result[j++], "%u", le16_to_cpu(desc->tx.vlan_tag));
|
||||
sprintf(result[j++], "%u", le16_to_cpu(desc->tx.send_size));
|
||||
sprintf(result[j++], "%#x",
|
||||
le32_to_cpu(desc->tx.type_cs_vlan_tso_len));
|
||||
sprintf(result[j++], "%5u", le16_to_cpu(desc->tx.outer_vlan_tag));
|
||||
sprintf(result[j++], "%5u", le16_to_cpu(desc->tx.tv));
|
||||
sprintf(result[j++], "%10u",
|
||||
sprintf(result[j++], "%u", le16_to_cpu(desc->tx.outer_vlan_tag));
|
||||
sprintf(result[j++], "%u", le16_to_cpu(desc->tx.tv));
|
||||
sprintf(result[j++], "%u",
|
||||
le32_to_cpu(desc->tx.ol_type_vlan_len_msec));
|
||||
sprintf(result[j++], "%#x", le32_to_cpu(desc->tx.paylen_ol4cs));
|
||||
sprintf(result[j++], "%#x", le16_to_cpu(desc->tx.bdtp_fe_sc_vld_ra_ri));
|
||||
sprintf(result[j++], "%5u", le16_to_cpu(desc->tx.mss_hw_csum));
|
||||
sprintf(result[j++], "%u", le16_to_cpu(desc->tx.mss_hw_csum));
|
||||
}
|
||||
|
||||
static int hns3_dbg_tx_bd_info(struct hns3_dbg_data *d, char *buf, int len)
|
||||
|
@ -5203,6 +5203,13 @@ static void hns3_state_init(struct hnae3_handle *handle)
|
||||
set_bit(HNS3_NIC_STATE_RXD_ADV_LAYOUT_ENABLE, &priv->state);
|
||||
}
|
||||
|
||||
static void hns3_state_uninit(struct hnae3_handle *handle)
|
||||
{
|
||||
struct hns3_nic_priv *priv = handle->priv;
|
||||
|
||||
clear_bit(HNS3_NIC_STATE_INITED, &priv->state);
|
||||
}
|
||||
|
||||
static int hns3_client_init(struct hnae3_handle *handle)
|
||||
{
|
||||
struct pci_dev *pdev = handle->pdev;
|
||||
@ -5320,7 +5327,9 @@ static int hns3_client_init(struct hnae3_handle *handle)
|
||||
return ret;
|
||||
|
||||
out_reg_netdev_fail:
|
||||
hns3_state_uninit(handle);
|
||||
hns3_dbg_uninit(handle);
|
||||
hns3_client_stop(handle);
|
||||
out_client_start:
|
||||
hns3_free_rx_cpu_rmap(netdev);
|
||||
hns3_nic_uninit_irq(priv);
|
||||
|
@ -94,6 +94,13 @@ static int hclge_send_mbx_msg(struct hclge_vport *vport, u8 *msg, u16 msg_len,
|
||||
enum hclge_comm_cmd_status status;
|
||||
struct hclge_desc desc;
|
||||
|
||||
if (msg_len > HCLGE_MBX_MAX_MSG_SIZE) {
|
||||
dev_err(&hdev->pdev->dev,
|
||||
"msg data length(=%u) exceeds maximum(=%u)\n",
|
||||
msg_len, HCLGE_MBX_MAX_MSG_SIZE);
|
||||
return -EMSGSIZE;
|
||||
}
|
||||
|
||||
resp_pf_to_vf = (struct hclge_mbx_pf_to_vf_cmd *)desc.data;
|
||||
|
||||
hclge_cmd_setup_basic_desc(&desc, HCLGEVF_OPC_MBX_PF_TO_VF, false);
|
||||
@ -176,7 +183,7 @@ static int hclge_get_ring_chain_from_mbx(
|
||||
ring_num = req->msg.ring_num;
|
||||
|
||||
if (ring_num > HCLGE_MBX_MAX_RING_CHAIN_PARAM_NUM)
|
||||
return -ENOMEM;
|
||||
return -EINVAL;
|
||||
|
||||
for (i = 0; i < ring_num; i++) {
|
||||
if (req->msg.param[i].tqp_index >= vport->nic.kinfo.rss_size) {
|
||||
@ -587,9 +594,9 @@ static int hclge_set_vf_mtu(struct hclge_vport *vport,
|
||||
return hclge_set_vport_mtu(vport, mtu);
|
||||
}
|
||||
|
||||
static void hclge_get_queue_id_in_pf(struct hclge_vport *vport,
|
||||
struct hclge_mbx_vf_to_pf_cmd *mbx_req,
|
||||
struct hclge_respond_to_vf_msg *resp_msg)
|
||||
static int hclge_get_queue_id_in_pf(struct hclge_vport *vport,
|
||||
struct hclge_mbx_vf_to_pf_cmd *mbx_req,
|
||||
struct hclge_respond_to_vf_msg *resp_msg)
|
||||
{
|
||||
struct hnae3_handle *handle = &vport->nic;
|
||||
struct hclge_dev *hdev = vport->back;
|
||||
@ -599,17 +606,18 @@ static void hclge_get_queue_id_in_pf(struct hclge_vport *vport,
|
||||
if (queue_id >= handle->kinfo.num_tqps) {
|
||||
dev_err(&hdev->pdev->dev, "Invalid queue id(%u) from VF %u\n",
|
||||
queue_id, mbx_req->mbx_src_vfid);
|
||||
return;
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
qid_in_pf = hclge_covert_handle_qid_global(&vport->nic, queue_id);
|
||||
memcpy(resp_msg->data, &qid_in_pf, sizeof(qid_in_pf));
|
||||
resp_msg->len = sizeof(qid_in_pf);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void hclge_get_rss_key(struct hclge_vport *vport,
|
||||
struct hclge_mbx_vf_to_pf_cmd *mbx_req,
|
||||
struct hclge_respond_to_vf_msg *resp_msg)
|
||||
static int hclge_get_rss_key(struct hclge_vport *vport,
|
||||
struct hclge_mbx_vf_to_pf_cmd *mbx_req,
|
||||
struct hclge_respond_to_vf_msg *resp_msg)
|
||||
{
|
||||
#define HCLGE_RSS_MBX_RESP_LEN 8
|
||||
struct hclge_dev *hdev = vport->back;
|
||||
@ -627,13 +635,14 @@ static void hclge_get_rss_key(struct hclge_vport *vport,
|
||||
dev_warn(&hdev->pdev->dev,
|
||||
"failed to get the rss hash key, the index(%u) invalid !\n",
|
||||
index);
|
||||
return;
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
memcpy(resp_msg->data,
|
||||
&rss_cfg->rss_hash_key[index * HCLGE_RSS_MBX_RESP_LEN],
|
||||
HCLGE_RSS_MBX_RESP_LEN);
|
||||
resp_msg->len = HCLGE_RSS_MBX_RESP_LEN;
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void hclge_link_fail_parse(struct hclge_dev *hdev, u8 link_fail_code)
|
||||
@ -809,10 +818,10 @@ void hclge_mbx_handler(struct hclge_dev *hdev)
|
||||
"VF fail(%d) to set mtu\n", ret);
|
||||
break;
|
||||
case HCLGE_MBX_GET_QID_IN_PF:
|
||||
hclge_get_queue_id_in_pf(vport, req, &resp_msg);
|
||||
ret = hclge_get_queue_id_in_pf(vport, req, &resp_msg);
|
||||
break;
|
||||
case HCLGE_MBX_GET_RSS_KEY:
|
||||
hclge_get_rss_key(vport, req, &resp_msg);
|
||||
ret = hclge_get_rss_key(vport, req, &resp_msg);
|
||||
break;
|
||||
case HCLGE_MBX_GET_LINK_MODE:
|
||||
hclge_get_link_mode(vport, req);
|
||||
|
@ -3210,13 +3210,8 @@ static void ibmvnic_get_ringparam(struct net_device *netdev,
|
||||
{
|
||||
struct ibmvnic_adapter *adapter = netdev_priv(netdev);
|
||||
|
||||
if (adapter->priv_flags & IBMVNIC_USE_SERVER_MAXES) {
|
||||
ring->rx_max_pending = adapter->max_rx_add_entries_per_subcrq;
|
||||
ring->tx_max_pending = adapter->max_tx_entries_per_subcrq;
|
||||
} else {
|
||||
ring->rx_max_pending = IBMVNIC_MAX_QUEUE_SZ;
|
||||
ring->tx_max_pending = IBMVNIC_MAX_QUEUE_SZ;
|
||||
}
|
||||
ring->rx_max_pending = adapter->max_rx_add_entries_per_subcrq;
|
||||
ring->tx_max_pending = adapter->max_tx_entries_per_subcrq;
|
||||
ring->rx_mini_max_pending = 0;
|
||||
ring->rx_jumbo_max_pending = 0;
|
||||
ring->rx_pending = adapter->req_rx_add_entries_per_subcrq;
|
||||
@ -3231,23 +3226,21 @@ static int ibmvnic_set_ringparam(struct net_device *netdev,
|
||||
struct netlink_ext_ack *extack)
|
||||
{
|
||||
struct ibmvnic_adapter *adapter = netdev_priv(netdev);
|
||||
int ret;
|
||||
|
||||
ret = 0;
|
||||
if (ring->rx_pending > adapter->max_rx_add_entries_per_subcrq ||
|
||||
ring->tx_pending > adapter->max_tx_entries_per_subcrq) {
|
||||
netdev_err(netdev, "Invalid request.\n");
|
||||
netdev_err(netdev, "Max tx buffers = %llu\n",
|
||||
adapter->max_rx_add_entries_per_subcrq);
|
||||
netdev_err(netdev, "Max rx buffers = %llu\n",
|
||||
adapter->max_tx_entries_per_subcrq);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
adapter->desired.rx_entries = ring->rx_pending;
|
||||
adapter->desired.tx_entries = ring->tx_pending;
|
||||
|
||||
ret = wait_for_reset(adapter);
|
||||
|
||||
if (!ret &&
|
||||
(adapter->req_rx_add_entries_per_subcrq != ring->rx_pending ||
|
||||
adapter->req_tx_entries_per_subcrq != ring->tx_pending))
|
||||
netdev_info(netdev,
|
||||
"Could not match full ringsize request. Requested: RX %d, TX %d; Allowed: RX %llu, TX %llu\n",
|
||||
ring->rx_pending, ring->tx_pending,
|
||||
adapter->req_rx_add_entries_per_subcrq,
|
||||
adapter->req_tx_entries_per_subcrq);
|
||||
return ret;
|
||||
return wait_for_reset(adapter);
|
||||
}
|
||||
|
||||
static void ibmvnic_get_channels(struct net_device *netdev,
|
||||
@ -3255,14 +3248,8 @@ static void ibmvnic_get_channels(struct net_device *netdev,
|
||||
{
|
||||
struct ibmvnic_adapter *adapter = netdev_priv(netdev);
|
||||
|
||||
if (adapter->priv_flags & IBMVNIC_USE_SERVER_MAXES) {
|
||||
channels->max_rx = adapter->max_rx_queues;
|
||||
channels->max_tx = adapter->max_tx_queues;
|
||||
} else {
|
||||
channels->max_rx = IBMVNIC_MAX_QUEUES;
|
||||
channels->max_tx = IBMVNIC_MAX_QUEUES;
|
||||
}
|
||||
|
||||
channels->max_rx = adapter->max_rx_queues;
|
||||
channels->max_tx = adapter->max_tx_queues;
|
||||
channels->max_other = 0;
|
||||
channels->max_combined = 0;
|
||||
channels->rx_count = adapter->req_rx_queues;
|
||||
@ -3275,22 +3262,11 @@ static int ibmvnic_set_channels(struct net_device *netdev,
|
||||
struct ethtool_channels *channels)
|
||||
{
|
||||
struct ibmvnic_adapter *adapter = netdev_priv(netdev);
|
||||
int ret;
|
||||
|
||||
ret = 0;
|
||||
adapter->desired.rx_queues = channels->rx_count;
|
||||
adapter->desired.tx_queues = channels->tx_count;
|
||||
|
||||
ret = wait_for_reset(adapter);
|
||||
|
||||
if (!ret &&
|
||||
(adapter->req_rx_queues != channels->rx_count ||
|
||||
adapter->req_tx_queues != channels->tx_count))
|
||||
netdev_info(netdev,
|
||||
"Could not match full channels request. Requested: RX %d, TX %d; Allowed: RX %llu, TX %llu\n",
|
||||
channels->rx_count, channels->tx_count,
|
||||
adapter->req_rx_queues, adapter->req_tx_queues);
|
||||
return ret;
|
||||
return wait_for_reset(adapter);
|
||||
}
|
||||
|
||||
static void ibmvnic_get_strings(struct net_device *dev, u32 stringset, u8 *data)
|
||||
@ -3298,43 +3274,32 @@ static void ibmvnic_get_strings(struct net_device *dev, u32 stringset, u8 *data)
|
||||
struct ibmvnic_adapter *adapter = netdev_priv(dev);
|
||||
int i;
|
||||
|
||||
switch (stringset) {
|
||||
case ETH_SS_STATS:
|
||||
for (i = 0; i < ARRAY_SIZE(ibmvnic_stats);
|
||||
i++, data += ETH_GSTRING_LEN)
|
||||
memcpy(data, ibmvnic_stats[i].name, ETH_GSTRING_LEN);
|
||||
|
||||
for (i = 0; i < adapter->req_tx_queues; i++) {
|
||||
snprintf(data, ETH_GSTRING_LEN, "tx%d_packets", i);
|
||||
data += ETH_GSTRING_LEN;
|
||||
|
||||
snprintf(data, ETH_GSTRING_LEN, "tx%d_bytes", i);
|
||||
data += ETH_GSTRING_LEN;
|
||||
|
||||
snprintf(data, ETH_GSTRING_LEN,
|
||||
"tx%d_dropped_packets", i);
|
||||
data += ETH_GSTRING_LEN;
|
||||
}
|
||||
|
||||
for (i = 0; i < adapter->req_rx_queues; i++) {
|
||||
snprintf(data, ETH_GSTRING_LEN, "rx%d_packets", i);
|
||||
data += ETH_GSTRING_LEN;
|
||||
|
||||
snprintf(data, ETH_GSTRING_LEN, "rx%d_bytes", i);
|
||||
data += ETH_GSTRING_LEN;
|
||||
|
||||
snprintf(data, ETH_GSTRING_LEN, "rx%d_interrupts", i);
|
||||
data += ETH_GSTRING_LEN;
|
||||
}
|
||||
break;
|
||||
|
||||
case ETH_SS_PRIV_FLAGS:
|
||||
for (i = 0; i < ARRAY_SIZE(ibmvnic_priv_flags); i++)
|
||||
strcpy(data + i * ETH_GSTRING_LEN,
|
||||
ibmvnic_priv_flags[i]);
|
||||
break;
|
||||
default:
|
||||
if (stringset != ETH_SS_STATS)
|
||||
return;
|
||||
|
||||
for (i = 0; i < ARRAY_SIZE(ibmvnic_stats); i++, data += ETH_GSTRING_LEN)
|
||||
memcpy(data, ibmvnic_stats[i].name, ETH_GSTRING_LEN);
|
||||
|
||||
for (i = 0; i < adapter->req_tx_queues; i++) {
|
||||
snprintf(data, ETH_GSTRING_LEN, "tx%d_packets", i);
|
||||
data += ETH_GSTRING_LEN;
|
||||
|
||||
snprintf(data, ETH_GSTRING_LEN, "tx%d_bytes", i);
|
||||
data += ETH_GSTRING_LEN;
|
||||
|
||||
snprintf(data, ETH_GSTRING_LEN, "tx%d_dropped_packets", i);
|
||||
data += ETH_GSTRING_LEN;
|
||||
}
|
||||
|
||||
for (i = 0; i < adapter->req_rx_queues; i++) {
|
||||
snprintf(data, ETH_GSTRING_LEN, "rx%d_packets", i);
|
||||
data += ETH_GSTRING_LEN;
|
||||
|
||||
snprintf(data, ETH_GSTRING_LEN, "rx%d_bytes", i);
|
||||
data += ETH_GSTRING_LEN;
|
||||
|
||||
snprintf(data, ETH_GSTRING_LEN, "rx%d_interrupts", i);
|
||||
data += ETH_GSTRING_LEN;
|
||||
}
|
||||
}
|
||||
|
||||
@ -3347,8 +3312,6 @@ static int ibmvnic_get_sset_count(struct net_device *dev, int sset)
|
||||
return ARRAY_SIZE(ibmvnic_stats) +
|
||||
adapter->req_tx_queues * NUM_TX_STATS +
|
||||
adapter->req_rx_queues * NUM_RX_STATS;
|
||||
case ETH_SS_PRIV_FLAGS:
|
||||
return ARRAY_SIZE(ibmvnic_priv_flags);
|
||||
default:
|
||||
return -EOPNOTSUPP;
|
||||
}
|
||||
@ -3401,26 +3364,6 @@ static void ibmvnic_get_ethtool_stats(struct net_device *dev,
|
||||
}
|
||||
}
|
||||
|
||||
static u32 ibmvnic_get_priv_flags(struct net_device *netdev)
|
||||
{
|
||||
struct ibmvnic_adapter *adapter = netdev_priv(netdev);
|
||||
|
||||
return adapter->priv_flags;
|
||||
}
|
||||
|
||||
static int ibmvnic_set_priv_flags(struct net_device *netdev, u32 flags)
|
||||
{
|
||||
struct ibmvnic_adapter *adapter = netdev_priv(netdev);
|
||||
bool which_maxes = !!(flags & IBMVNIC_USE_SERVER_MAXES);
|
||||
|
||||
if (which_maxes)
|
||||
adapter->priv_flags |= IBMVNIC_USE_SERVER_MAXES;
|
||||
else
|
||||
adapter->priv_flags &= ~IBMVNIC_USE_SERVER_MAXES;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static const struct ethtool_ops ibmvnic_ethtool_ops = {
|
||||
.get_drvinfo = ibmvnic_get_drvinfo,
|
||||
.get_msglevel = ibmvnic_get_msglevel,
|
||||
@ -3434,8 +3377,6 @@ static const struct ethtool_ops ibmvnic_ethtool_ops = {
|
||||
.get_sset_count = ibmvnic_get_sset_count,
|
||||
.get_ethtool_stats = ibmvnic_get_ethtool_stats,
|
||||
.get_link_ksettings = ibmvnic_get_link_ksettings,
|
||||
.get_priv_flags = ibmvnic_get_priv_flags,
|
||||
.set_priv_flags = ibmvnic_set_priv_flags,
|
||||
};
|
||||
|
||||
/* Routines for managing CRQs/sCRQs */
|
||||
|
@ -41,11 +41,6 @@
|
||||
|
||||
#define IBMVNIC_RESET_DELAY 100
|
||||
|
||||
static const char ibmvnic_priv_flags[][ETH_GSTRING_LEN] = {
|
||||
#define IBMVNIC_USE_SERVER_MAXES 0x1
|
||||
"use-server-maxes"
|
||||
};
|
||||
|
||||
struct ibmvnic_login_buffer {
|
||||
__be32 len;
|
||||
__be32 version;
|
||||
@ -883,7 +878,6 @@ struct ibmvnic_adapter {
|
||||
struct ibmvnic_control_ip_offload_buffer ip_offload_ctrl;
|
||||
dma_addr_t ip_offload_ctrl_tok;
|
||||
u32 msg_enable;
|
||||
u32 priv_flags;
|
||||
|
||||
/* Vital Product Data (VPD) */
|
||||
struct ibmvnic_vpd *vpd;
|
||||
|
@ -6929,12 +6929,15 @@ static void ice_rebuild(struct ice_pf *pf, enum ice_reset_req reset_type)
|
||||
|
||||
dev_dbg(dev, "rebuilding PF after reset_type=%d\n", reset_type);
|
||||
|
||||
#define ICE_EMP_RESET_SLEEP_MS 5000
|
||||
if (reset_type == ICE_RESET_EMPR) {
|
||||
/* If an EMP reset has occurred, any previously pending flash
|
||||
* update will have completed. We no longer know whether or
|
||||
* not the NVM update EMP reset is restricted.
|
||||
*/
|
||||
pf->fw_emp_reset_disabled = false;
|
||||
|
||||
msleep(ICE_EMP_RESET_SLEEP_MS);
|
||||
}
|
||||
|
||||
err = ice_init_all_ctrlq(hw);
|
||||
|
@ -1046,8 +1046,8 @@ int ice_sriov_configure(struct pci_dev *pdev, int num_vfs)
|
||||
|
||||
if (!num_vfs) {
|
||||
if (!pci_vfs_assigned(pdev)) {
|
||||
ice_mbx_deinit_snapshot(&pf->hw);
|
||||
ice_free_vfs(pf);
|
||||
ice_mbx_deinit_snapshot(&pf->hw);
|
||||
if (pf->lag)
|
||||
ice_enable_lag(pf->lag);
|
||||
return 0;
|
||||
|
@ -3625,6 +3625,8 @@ void ice_vc_process_vf_msg(struct ice_pf *pf, struct ice_rq_event_info *event)
|
||||
return;
|
||||
}
|
||||
|
||||
mutex_lock(&vf->cfg_lock);
|
||||
|
||||
/* Check if VF is disabled. */
|
||||
if (test_bit(ICE_VF_STATE_DIS, vf->vf_states)) {
|
||||
err = -EPERM;
|
||||
@ -3642,32 +3644,20 @@ void ice_vc_process_vf_msg(struct ice_pf *pf, struct ice_rq_event_info *event)
|
||||
err = -EINVAL;
|
||||
}
|
||||
|
||||
if (!ice_vc_is_opcode_allowed(vf, v_opcode)) {
|
||||
ice_vc_send_msg_to_vf(vf, v_opcode,
|
||||
VIRTCHNL_STATUS_ERR_NOT_SUPPORTED, NULL,
|
||||
0);
|
||||
ice_put_vf(vf);
|
||||
return;
|
||||
}
|
||||
|
||||
error_handler:
|
||||
if (err) {
|
||||
ice_vc_send_msg_to_vf(vf, v_opcode, VIRTCHNL_STATUS_ERR_PARAM,
|
||||
NULL, 0);
|
||||
dev_err(dev, "Invalid message from VF %d, opcode %d, len %d, error %d\n",
|
||||
vf_id, v_opcode, msglen, err);
|
||||
ice_put_vf(vf);
|
||||
return;
|
||||
goto finish;
|
||||
}
|
||||
|
||||
/* VF is being configured in another context that triggers a VFR, so no
|
||||
* need to process this message
|
||||
*/
|
||||
if (!mutex_trylock(&vf->cfg_lock)) {
|
||||
dev_info(dev, "VF %u is being configured in another context that will trigger a VFR, so there is no need to handle this message\n",
|
||||
vf->vf_id);
|
||||
ice_put_vf(vf);
|
||||
return;
|
||||
if (!ice_vc_is_opcode_allowed(vf, v_opcode)) {
|
||||
ice_vc_send_msg_to_vf(vf, v_opcode,
|
||||
VIRTCHNL_STATUS_ERR_NOT_SUPPORTED, NULL,
|
||||
0);
|
||||
goto finish;
|
||||
}
|
||||
|
||||
switch (v_opcode) {
|
||||
@ -3780,6 +3770,7 @@ error_handler:
|
||||
vf_id, v_opcode, err);
|
||||
}
|
||||
|
||||
finish:
|
||||
mutex_unlock(&vf->cfg_lock);
|
||||
ice_put_vf(vf);
|
||||
}
|
||||
|
@ -903,7 +903,8 @@ int ixgbe_ipsec_vf_add_sa(struct ixgbe_adapter *adapter, u32 *msgbuf, u32 vf)
|
||||
/* Tx IPsec offload doesn't seem to work on this
|
||||
* device, so block these requests for now.
|
||||
*/
|
||||
if (!(sam->flags & XFRM_OFFLOAD_INBOUND)) {
|
||||
sam->flags = sam->flags & ~XFRM_OFFLOAD_IPV6;
|
||||
if (sam->flags != XFRM_OFFLOAD_INBOUND) {
|
||||
err = -EOPNOTSUPP;
|
||||
goto err_out;
|
||||
}
|
||||
|
@ -346,7 +346,7 @@ static void lan966x_mac_irq_process(struct lan966x *lan966x, u32 row,
|
||||
|
||||
lan966x_mac_process_raw_entry(&raw_entries[column],
|
||||
mac, &vid, &dest_idx);
|
||||
if (WARN_ON(dest_idx > lan966x->num_phys_ports))
|
||||
if (WARN_ON(dest_idx >= lan966x->num_phys_ports))
|
||||
continue;
|
||||
|
||||
/* If the entry in SW is found, then there is nothing
|
||||
@ -393,7 +393,7 @@ static void lan966x_mac_irq_process(struct lan966x *lan966x, u32 row,
|
||||
|
||||
lan966x_mac_process_raw_entry(&raw_entries[column],
|
||||
mac, &vid, &dest_idx);
|
||||
if (WARN_ON(dest_idx > lan966x->num_phys_ports))
|
||||
if (WARN_ON(dest_idx >= lan966x->num_phys_ports))
|
||||
continue;
|
||||
|
||||
mac_entry = lan966x_mac_alloc_entry(mac, vid, dest_idx);
|
||||
|
@ -551,7 +551,7 @@ int ocelot_port_vlan_filtering(struct ocelot *ocelot, int port,
|
||||
struct ocelot_vcap_block *block = &ocelot->block[VCAP_IS1];
|
||||
struct ocelot_port *ocelot_port = ocelot->ports[port];
|
||||
struct ocelot_vcap_filter *filter;
|
||||
int err;
|
||||
int err = 0;
|
||||
u32 val;
|
||||
|
||||
list_for_each_entry(filter, &block->rules, list) {
|
||||
@ -570,7 +570,7 @@ int ocelot_port_vlan_filtering(struct ocelot *ocelot, int port,
|
||||
if (vlan_aware)
|
||||
err = ocelot_del_vlan_unaware_pvid(ocelot, port,
|
||||
ocelot_port->bridge);
|
||||
else
|
||||
else if (ocelot_port->bridge)
|
||||
err = ocelot_add_vlan_unaware_pvid(ocelot, port,
|
||||
ocelot_port->bridge);
|
||||
if (err)
|
||||
@ -629,6 +629,13 @@ int ocelot_vlan_add(struct ocelot *ocelot, int port, u16 vid, bool pvid,
|
||||
{
|
||||
int err;
|
||||
|
||||
/* Ignore VID 0 added to our RX filter by the 8021q module, since
|
||||
* that collides with OCELOT_STANDALONE_PVID and changes it from
|
||||
* egress-untagged to egress-tagged.
|
||||
*/
|
||||
if (!vid)
|
||||
return 0;
|
||||
|
||||
err = ocelot_vlan_member_add(ocelot, port, vid, untagged);
|
||||
if (err)
|
||||
return err;
|
||||
@ -651,6 +658,9 @@ int ocelot_vlan_del(struct ocelot *ocelot, int port, u16 vid)
|
||||
bool del_pvid = false;
|
||||
int err;
|
||||
|
||||
if (!vid)
|
||||
return 0;
|
||||
|
||||
if (ocelot_port->pvid_vlan && ocelot_port->pvid_vlan->vid == vid)
|
||||
del_pvid = true;
|
||||
|
||||
|
@ -65,8 +65,9 @@ static void socfpga_dwmac_fix_mac_speed(void *priv, unsigned int speed)
|
||||
struct phy_device *phy_dev = ndev->phydev;
|
||||
u32 val;
|
||||
|
||||
writew(SGMII_ADAPTER_DISABLE,
|
||||
sgmii_adapter_base + SGMII_ADAPTER_CTRL_REG);
|
||||
if (sgmii_adapter_base)
|
||||
writew(SGMII_ADAPTER_DISABLE,
|
||||
sgmii_adapter_base + SGMII_ADAPTER_CTRL_REG);
|
||||
|
||||
if (splitter_base) {
|
||||
val = readl(splitter_base + EMAC_SPLITTER_CTRL_REG);
|
||||
@ -88,10 +89,11 @@ static void socfpga_dwmac_fix_mac_speed(void *priv, unsigned int speed)
|
||||
writel(val, splitter_base + EMAC_SPLITTER_CTRL_REG);
|
||||
}
|
||||
|
||||
writew(SGMII_ADAPTER_ENABLE,
|
||||
sgmii_adapter_base + SGMII_ADAPTER_CTRL_REG);
|
||||
if (phy_dev)
|
||||
if (phy_dev && sgmii_adapter_base) {
|
||||
writew(SGMII_ADAPTER_ENABLE,
|
||||
sgmii_adapter_base + SGMII_ADAPTER_CTRL_REG);
|
||||
tse_pcs_fix_mac_speed(&dwmac->pcs, phy_dev, speed);
|
||||
}
|
||||
}
|
||||
|
||||
static int socfpga_dwmac_parse_data(struct socfpga_dwmac *dwmac, struct device *dev)
|
||||
|
@ -880,7 +880,7 @@ static int mv3310_read_status_copper(struct phy_device *phydev)
|
||||
|
||||
cssr1 = phy_read_mmd(phydev, MDIO_MMD_PCS, MV_PCS_CSSR1);
|
||||
if (cssr1 < 0)
|
||||
return val;
|
||||
return cssr1;
|
||||
|
||||
/* If the link settings are not resolved, mark the link down */
|
||||
if (!(cssr1 & MV_PCS_CSSR1_RESOLVED)) {
|
||||
|
@ -1005,6 +1005,24 @@ static struct sk_buff *receive_mergeable(struct net_device *dev,
|
||||
* xdp.data_meta were adjusted
|
||||
*/
|
||||
len = xdp.data_end - xdp.data + vi->hdr_len + metasize;
|
||||
|
||||
/* recalculate headroom if xdp.data or xdp_data_meta
|
||||
* were adjusted, note that offset should always point
|
||||
* to the start of the reserved bytes for virtio_net
|
||||
* header which are followed by xdp.data, that means
|
||||
* that offset is equal to the headroom (when buf is
|
||||
* starting at the beginning of the page, otherwise
|
||||
* there is a base offset inside the page) but it's used
|
||||
* with a different starting point (buf start) than
|
||||
* xdp.data (buf start + vnet hdr size). If xdp.data or
|
||||
* data_meta were adjusted by the xdp prog then the
|
||||
* headroom size has changed and so has the offset, we
|
||||
* can use data_hard_start, which points at buf start +
|
||||
* vnet hdr size, to calculate the new headroom and use
|
||||
* it later to compute buf start in page_to_skb()
|
||||
*/
|
||||
headroom = xdp.data - xdp.data_hard_start - metasize;
|
||||
|
||||
/* We can only create skb based on xdp_page. */
|
||||
if (unlikely(xdp_page != page)) {
|
||||
rcu_read_unlock();
|
||||
@ -1012,7 +1030,7 @@ static struct sk_buff *receive_mergeable(struct net_device *dev,
|
||||
head_skb = page_to_skb(vi, rq, xdp_page, offset,
|
||||
len, PAGE_SIZE, false,
|
||||
metasize,
|
||||
VIRTIO_XDP_HEADROOM);
|
||||
headroom);
|
||||
return head_skb;
|
||||
}
|
||||
break;
|
||||
|
@ -349,7 +349,7 @@ static int __init cosa_init(void)
|
||||
}
|
||||
} else {
|
||||
cosa_major = register_chrdev(0, "cosa", &cosa_fops);
|
||||
if (!cosa_major) {
|
||||
if (cosa_major < 0) {
|
||||
pr_warn("unable to register chardev\n");
|
||||
err = -EIO;
|
||||
goto out;
|
||||
|
@ -19,6 +19,7 @@
|
||||
#include <linux/if_arp.h>
|
||||
#include <linux/icmp.h>
|
||||
#include <linux/suspend.h>
|
||||
#include <net/dst_metadata.h>
|
||||
#include <net/icmp.h>
|
||||
#include <net/rtnetlink.h>
|
||||
#include <net/ip_tunnels.h>
|
||||
@ -167,7 +168,7 @@ static netdev_tx_t wg_xmit(struct sk_buff *skb, struct net_device *dev)
|
||||
goto err_peer;
|
||||
}
|
||||
|
||||
mtu = skb_dst(skb) ? dst_mtu(skb_dst(skb)) : dev->mtu;
|
||||
mtu = skb_valid_dst(skb) ? dst_mtu(skb_dst(skb)) : dev->mtu;
|
||||
|
||||
__skb_queue_head_init(&packets);
|
||||
if (!skb_is_gso(skb)) {
|
||||
|
@ -199,10 +199,10 @@ struct net_device_stats {
|
||||
* Try to fit them in a single cache line, for dev_get_stats() sake.
|
||||
*/
|
||||
struct net_device_core_stats {
|
||||
local_t rx_dropped;
|
||||
local_t tx_dropped;
|
||||
local_t rx_nohandler;
|
||||
} __aligned(4 * sizeof(local_t));
|
||||
unsigned long rx_dropped;
|
||||
unsigned long tx_dropped;
|
||||
unsigned long rx_nohandler;
|
||||
} __aligned(4 * sizeof(unsigned long));
|
||||
|
||||
#include <linux/cache.h>
|
||||
#include <linux/skbuff.h>
|
||||
@ -3843,15 +3843,15 @@ static __always_inline bool __is_skb_forwardable(const struct net_device *dev,
|
||||
return false;
|
||||
}
|
||||
|
||||
struct net_device_core_stats *netdev_core_stats_alloc(struct net_device *dev);
|
||||
struct net_device_core_stats __percpu *netdev_core_stats_alloc(struct net_device *dev);
|
||||
|
||||
static inline struct net_device_core_stats *dev_core_stats(struct net_device *dev)
|
||||
static inline struct net_device_core_stats __percpu *dev_core_stats(struct net_device *dev)
|
||||
{
|
||||
/* This READ_ONCE() pairs with the write in netdev_core_stats_alloc() */
|
||||
struct net_device_core_stats __percpu *p = READ_ONCE(dev->core_stats);
|
||||
|
||||
if (likely(p))
|
||||
return this_cpu_ptr(p);
|
||||
return p;
|
||||
|
||||
return netdev_core_stats_alloc(dev);
|
||||
}
|
||||
@ -3859,14 +3859,11 @@ static inline struct net_device_core_stats *dev_core_stats(struct net_device *de
|
||||
#define DEV_CORE_STATS_INC(FIELD) \
|
||||
static inline void dev_core_stats_##FIELD##_inc(struct net_device *dev) \
|
||||
{ \
|
||||
struct net_device_core_stats *p; \
|
||||
struct net_device_core_stats __percpu *p; \
|
||||
\
|
||||
preempt_disable(); \
|
||||
p = dev_core_stats(dev); \
|
||||
\
|
||||
if (p) \
|
||||
local_inc(&p->FIELD); \
|
||||
preempt_enable(); \
|
||||
this_cpu_inc(p->FIELD); \
|
||||
}
|
||||
DEV_CORE_STATS_INC(rx_dropped)
|
||||
DEV_CORE_STATS_INC(tx_dropped)
|
||||
|
@ -578,6 +578,7 @@ enum {
|
||||
#define HCI_ERROR_CONNECTION_TIMEOUT 0x08
|
||||
#define HCI_ERROR_REJ_LIMITED_RESOURCES 0x0d
|
||||
#define HCI_ERROR_REJ_BAD_ADDR 0x0f
|
||||
#define HCI_ERROR_INVALID_PARAMETERS 0x12
|
||||
#define HCI_ERROR_REMOTE_USER_TERM 0x13
|
||||
#define HCI_ERROR_REMOTE_LOW_RESOURCES 0x14
|
||||
#define HCI_ERROR_REMOTE_POWER_OFF 0x15
|
||||
|
@ -1156,7 +1156,7 @@ int hci_conn_switch_role(struct hci_conn *conn, __u8 role);
|
||||
|
||||
void hci_conn_enter_active_mode(struct hci_conn *conn, __u8 force_active);
|
||||
|
||||
void hci_le_conn_failed(struct hci_conn *conn, u8 status);
|
||||
void hci_conn_failed(struct hci_conn *conn, u8 status);
|
||||
|
||||
/*
|
||||
* hci_conn_get() and hci_conn_put() are used to control the life-time of an
|
||||
|
@ -58,7 +58,7 @@ struct ip6_tnl {
|
||||
|
||||
/* These fields used only by GRE */
|
||||
__u32 i_seqno; /* The last seen seqno */
|
||||
__u32 o_seqno; /* The last output seqno */
|
||||
atomic_t o_seqno; /* The last output seqno */
|
||||
int hlen; /* tun_hlen + encap_hlen */
|
||||
int tun_hlen; /* Precalculated header length */
|
||||
int encap_hlen; /* Encap header length (FOU,GUE) */
|
||||
|
@ -116,7 +116,7 @@ struct ip_tunnel {
|
||||
|
||||
/* These four fields used only by GRE */
|
||||
u32 i_seqno; /* The last seen seqno */
|
||||
u32 o_seqno; /* The last output seqno */
|
||||
atomic_t o_seqno; /* The last output seqno */
|
||||
int tun_hlen; /* Precalculated header length */
|
||||
|
||||
/* These four fields used only by ERSPAN */
|
||||
|
@ -480,6 +480,7 @@ int __cookie_v4_check(const struct iphdr *iph, const struct tcphdr *th,
|
||||
u32 cookie);
|
||||
struct sock *cookie_v4_check(struct sock *sk, struct sk_buff *skb);
|
||||
struct request_sock *cookie_tcp_reqsk_alloc(const struct request_sock_ops *ops,
|
||||
const struct tcp_request_sock_ops *af_ops,
|
||||
struct sock *sk, struct sk_buff *skb);
|
||||
#ifdef CONFIG_SYN_COOKIES
|
||||
|
||||
@ -620,6 +621,7 @@ void tcp_synack_rtt_meas(struct sock *sk, struct request_sock *req);
|
||||
void tcp_reset(struct sock *sk, struct sk_buff *skb);
|
||||
void tcp_skb_mark_lost_uncond_verify(struct tcp_sock *tp, struct sk_buff *skb);
|
||||
void tcp_fin(struct sock *sk);
|
||||
void tcp_check_space(struct sock *sk);
|
||||
|
||||
/* tcp_timer.c */
|
||||
void tcp_init_xmit_timers(struct sock *);
|
||||
@ -1042,6 +1044,7 @@ struct rate_sample {
|
||||
int losses; /* number of packets marked lost upon ACK */
|
||||
u32 acked_sacked; /* number of packets newly (S)ACKed upon ACK */
|
||||
u32 prior_in_flight; /* in flight before this ACK */
|
||||
u32 last_end_seq; /* end_seq of most recently ACKed packet */
|
||||
bool is_app_limited; /* is sample from packet with bubble in pipe? */
|
||||
bool is_retrans; /* is sample from retransmission? */
|
||||
bool is_ack_delayed; /* is this (likely) a delayed ACK? */
|
||||
@ -1164,6 +1167,11 @@ void tcp_rate_gen(struct sock *sk, u32 delivered, u32 lost,
|
||||
bool is_sack_reneg, struct rate_sample *rs);
|
||||
void tcp_rate_check_app_limited(struct sock *sk);
|
||||
|
||||
static inline bool tcp_skb_sent_after(u64 t1, u64 t2, u32 seq1, u32 seq2)
|
||||
{
|
||||
return t1 > t2 || (t1 == t2 && after(seq1, seq2));
|
||||
}
|
||||
|
||||
/* These functions determine how the current flow behaves in respect of SACK
|
||||
* handling. SACK is negotiated with the peer, and therefore it can vary
|
||||
* between different flows.
|
||||
|
@ -97,6 +97,7 @@ int xp_assign_dev(struct xsk_buff_pool *pool, struct net_device *dev,
|
||||
u16 queue_id, u16 flags);
|
||||
int xp_assign_dev_shared(struct xsk_buff_pool *pool, struct xdp_umem *umem,
|
||||
struct net_device *dev, u16 queue_id);
|
||||
int xp_alloc_tx_descs(struct xsk_buff_pool *pool, struct xdp_sock *xs);
|
||||
void xp_destroy(struct xsk_buff_pool *pool);
|
||||
void xp_get_pool(struct xsk_buff_pool *pool);
|
||||
bool xp_put_pool(struct xsk_buff_pool *pool);
|
||||
|
@ -2126,7 +2126,7 @@ static void kretprobe_rethook_handler(struct rethook_node *rh, void *data,
|
||||
struct kprobe_ctlblk *kcb;
|
||||
|
||||
/* The data must NOT be null. This means rethook data structure is broken. */
|
||||
if (WARN_ON_ONCE(!data))
|
||||
if (WARN_ON_ONCE(!data) || !rp->handler)
|
||||
return;
|
||||
|
||||
__this_cpu_write(current_kprobe, &rp->kp);
|
||||
|
@ -670,7 +670,7 @@ static void le_conn_timeout(struct work_struct *work)
|
||||
/* Disable LE Advertising */
|
||||
le_disable_advertising(hdev);
|
||||
hci_dev_lock(hdev);
|
||||
hci_le_conn_failed(conn, HCI_ERROR_ADVERTISING_TIMEOUT);
|
||||
hci_conn_failed(conn, HCI_ERROR_ADVERTISING_TIMEOUT);
|
||||
hci_dev_unlock(hdev);
|
||||
return;
|
||||
}
|
||||
@ -873,7 +873,7 @@ struct hci_dev *hci_get_route(bdaddr_t *dst, bdaddr_t *src, uint8_t src_type)
|
||||
EXPORT_SYMBOL(hci_get_route);
|
||||
|
||||
/* This function requires the caller holds hdev->lock */
|
||||
void hci_le_conn_failed(struct hci_conn *conn, u8 status)
|
||||
static void hci_le_conn_failed(struct hci_conn *conn, u8 status)
|
||||
{
|
||||
struct hci_dev *hdev = conn->hdev;
|
||||
struct hci_conn_params *params;
|
||||
@ -886,8 +886,6 @@ void hci_le_conn_failed(struct hci_conn *conn, u8 status)
|
||||
params->conn = NULL;
|
||||
}
|
||||
|
||||
conn->state = BT_CLOSED;
|
||||
|
||||
/* If the status indicates successful cancellation of
|
||||
* the attempt (i.e. Unknown Connection Id) there's no point of
|
||||
* notifying failure since we'll go back to keep trying to
|
||||
@ -899,10 +897,6 @@ void hci_le_conn_failed(struct hci_conn *conn, u8 status)
|
||||
mgmt_connect_failed(hdev, &conn->dst, conn->type,
|
||||
conn->dst_type, status);
|
||||
|
||||
hci_connect_cfm(conn, status);
|
||||
|
||||
hci_conn_del(conn);
|
||||
|
||||
/* Since we may have temporarily stopped the background scanning in
|
||||
* favor of connection establishment, we should restart it.
|
||||
*/
|
||||
@ -914,6 +908,28 @@ void hci_le_conn_failed(struct hci_conn *conn, u8 status)
|
||||
hci_enable_advertising(hdev);
|
||||
}
|
||||
|
||||
/* This function requires the caller holds hdev->lock */
|
||||
void hci_conn_failed(struct hci_conn *conn, u8 status)
|
||||
{
|
||||
struct hci_dev *hdev = conn->hdev;
|
||||
|
||||
bt_dev_dbg(hdev, "status 0x%2.2x", status);
|
||||
|
||||
switch (conn->type) {
|
||||
case LE_LINK:
|
||||
hci_le_conn_failed(conn, status);
|
||||
break;
|
||||
case ACL_LINK:
|
||||
mgmt_connect_failed(hdev, &conn->dst, conn->type,
|
||||
conn->dst_type, status);
|
||||
break;
|
||||
}
|
||||
|
||||
conn->state = BT_CLOSED;
|
||||
hci_connect_cfm(conn, status);
|
||||
hci_conn_del(conn);
|
||||
}
|
||||
|
||||
static void create_le_conn_complete(struct hci_dev *hdev, void *data, int err)
|
||||
{
|
||||
struct hci_conn *conn = data;
|
||||
|
@ -2834,7 +2834,7 @@ static void hci_cs_le_create_conn(struct hci_dev *hdev, u8 status)
|
||||
bt_dev_dbg(hdev, "status 0x%2.2x", status);
|
||||
|
||||
/* All connection failure handling is taken care of by the
|
||||
* hci_le_conn_failed function which is triggered by the HCI
|
||||
* hci_conn_failed function which is triggered by the HCI
|
||||
* request completion callbacks used for connecting.
|
||||
*/
|
||||
if (status)
|
||||
@ -2859,7 +2859,7 @@ static void hci_cs_le_ext_create_conn(struct hci_dev *hdev, u8 status)
|
||||
bt_dev_dbg(hdev, "status 0x%2.2x", status);
|
||||
|
||||
/* All connection failure handling is taken care of by the
|
||||
* hci_le_conn_failed function which is triggered by the HCI
|
||||
* hci_conn_failed function which is triggered by the HCI
|
||||
* request completion callbacks used for connecting.
|
||||
*/
|
||||
if (status)
|
||||
@ -3067,18 +3067,20 @@ static void hci_conn_complete_evt(struct hci_dev *hdev, void *data,
|
||||
{
|
||||
struct hci_ev_conn_complete *ev = data;
|
||||
struct hci_conn *conn;
|
||||
u8 status = ev->status;
|
||||
|
||||
if (__le16_to_cpu(ev->handle) > HCI_CONN_HANDLE_MAX) {
|
||||
bt_dev_err(hdev, "Ignoring HCI_Connection_Complete for invalid handle");
|
||||
return;
|
||||
}
|
||||
|
||||
bt_dev_dbg(hdev, "status 0x%2.2x", ev->status);
|
||||
bt_dev_dbg(hdev, "status 0x%2.2x", status);
|
||||
|
||||
hci_dev_lock(hdev);
|
||||
|
||||
conn = hci_conn_hash_lookup_ba(hdev, ev->link_type, &ev->bdaddr);
|
||||
if (!conn) {
|
||||
/* In case of error status and there is no connection pending
|
||||
* just unlock as there is nothing to cleanup.
|
||||
*/
|
||||
if (ev->status)
|
||||
goto unlock;
|
||||
|
||||
/* Connection may not exist if auto-connected. Check the bredr
|
||||
* allowlist to see if this device is allowed to auto connect.
|
||||
* If link is an ACL type, create a connection class
|
||||
@ -3122,8 +3124,14 @@ static void hci_conn_complete_evt(struct hci_dev *hdev, void *data,
|
||||
goto unlock;
|
||||
}
|
||||
|
||||
if (!ev->status) {
|
||||
if (!status) {
|
||||
conn->handle = __le16_to_cpu(ev->handle);
|
||||
if (conn->handle > HCI_CONN_HANDLE_MAX) {
|
||||
bt_dev_err(hdev, "Invalid handle: 0x%4.4x > 0x%4.4x",
|
||||
conn->handle, HCI_CONN_HANDLE_MAX);
|
||||
status = HCI_ERROR_INVALID_PARAMETERS;
|
||||
goto done;
|
||||
}
|
||||
|
||||
if (conn->type == ACL_LINK) {
|
||||
conn->state = BT_CONFIG;
|
||||
@ -3164,19 +3172,14 @@ static void hci_conn_complete_evt(struct hci_dev *hdev, void *data,
|
||||
hci_send_cmd(hdev, HCI_OP_CHANGE_CONN_PTYPE, sizeof(cp),
|
||||
&cp);
|
||||
}
|
||||
} else {
|
||||
conn->state = BT_CLOSED;
|
||||
if (conn->type == ACL_LINK)
|
||||
mgmt_connect_failed(hdev, &conn->dst, conn->type,
|
||||
conn->dst_type, ev->status);
|
||||
}
|
||||
|
||||
if (conn->type == ACL_LINK)
|
||||
hci_sco_setup(conn, ev->status);
|
||||
|
||||
if (ev->status) {
|
||||
hci_connect_cfm(conn, ev->status);
|
||||
hci_conn_del(conn);
|
||||
done:
|
||||
if (status) {
|
||||
hci_conn_failed(conn, status);
|
||||
} else if (ev->link_type == SCO_LINK) {
|
||||
switch (conn->setting & SCO_AIRMODE_MASK) {
|
||||
case SCO_AIRMODE_CVSD:
|
||||
@ -3185,7 +3188,7 @@ static void hci_conn_complete_evt(struct hci_dev *hdev, void *data,
|
||||
break;
|
||||
}
|
||||
|
||||
hci_connect_cfm(conn, ev->status);
|
||||
hci_connect_cfm(conn, status);
|
||||
}
|
||||
|
||||
unlock:
|
||||
@ -4676,6 +4679,7 @@ static void hci_sync_conn_complete_evt(struct hci_dev *hdev, void *data,
|
||||
{
|
||||
struct hci_ev_sync_conn_complete *ev = data;
|
||||
struct hci_conn *conn;
|
||||
u8 status = ev->status;
|
||||
|
||||
switch (ev->link_type) {
|
||||
case SCO_LINK:
|
||||
@ -4690,12 +4694,7 @@ static void hci_sync_conn_complete_evt(struct hci_dev *hdev, void *data,
|
||||
return;
|
||||
}
|
||||
|
||||
if (__le16_to_cpu(ev->handle) > HCI_CONN_HANDLE_MAX) {
|
||||
bt_dev_err(hdev, "Ignoring HCI_Sync_Conn_Complete for invalid handle");
|
||||
return;
|
||||
}
|
||||
|
||||
bt_dev_dbg(hdev, "status 0x%2.2x", ev->status);
|
||||
bt_dev_dbg(hdev, "status 0x%2.2x", status);
|
||||
|
||||
hci_dev_lock(hdev);
|
||||
|
||||
@ -4729,9 +4728,17 @@ static void hci_sync_conn_complete_evt(struct hci_dev *hdev, void *data,
|
||||
goto unlock;
|
||||
}
|
||||
|
||||
switch (ev->status) {
|
||||
switch (status) {
|
||||
case 0x00:
|
||||
conn->handle = __le16_to_cpu(ev->handle);
|
||||
if (conn->handle > HCI_CONN_HANDLE_MAX) {
|
||||
bt_dev_err(hdev, "Invalid handle: 0x%4.4x > 0x%4.4x",
|
||||
conn->handle, HCI_CONN_HANDLE_MAX);
|
||||
status = HCI_ERROR_INVALID_PARAMETERS;
|
||||
conn->state = BT_CLOSED;
|
||||
break;
|
||||
}
|
||||
|
||||
conn->state = BT_CONNECTED;
|
||||
conn->type = ev->link_type;
|
||||
|
||||
@ -4775,8 +4782,8 @@ static void hci_sync_conn_complete_evt(struct hci_dev *hdev, void *data,
|
||||
}
|
||||
}
|
||||
|
||||
hci_connect_cfm(conn, ev->status);
|
||||
if (ev->status)
|
||||
hci_connect_cfm(conn, status);
|
||||
if (status)
|
||||
hci_conn_del(conn);
|
||||
|
||||
unlock:
|
||||
@ -5527,11 +5534,6 @@ static void le_conn_complete_evt(struct hci_dev *hdev, u8 status,
|
||||
struct smp_irk *irk;
|
||||
u8 addr_type;
|
||||
|
||||
if (handle > HCI_CONN_HANDLE_MAX) {
|
||||
bt_dev_err(hdev, "Ignoring HCI_LE_Connection_Complete for invalid handle");
|
||||
return;
|
||||
}
|
||||
|
||||
hci_dev_lock(hdev);
|
||||
|
||||
/* All controllers implicitly stop advertising in the event of a
|
||||
@ -5541,6 +5543,12 @@ static void le_conn_complete_evt(struct hci_dev *hdev, u8 status,
|
||||
|
||||
conn = hci_lookup_le_connect(hdev);
|
||||
if (!conn) {
|
||||
/* In case of error status and there is no connection pending
|
||||
* just unlock as there is nothing to cleanup.
|
||||
*/
|
||||
if (status)
|
||||
goto unlock;
|
||||
|
||||
conn = hci_conn_add(hdev, LE_LINK, bdaddr, role);
|
||||
if (!conn) {
|
||||
bt_dev_err(hdev, "no memory for new connection");
|
||||
@ -5603,8 +5611,14 @@ static void le_conn_complete_evt(struct hci_dev *hdev, u8 status,
|
||||
|
||||
conn->dst_type = ev_bdaddr_type(hdev, conn->dst_type, NULL);
|
||||
|
||||
if (handle > HCI_CONN_HANDLE_MAX) {
|
||||
bt_dev_err(hdev, "Invalid handle: 0x%4.4x > 0x%4.4x", handle,
|
||||
HCI_CONN_HANDLE_MAX);
|
||||
status = HCI_ERROR_INVALID_PARAMETERS;
|
||||
}
|
||||
|
||||
if (status) {
|
||||
hci_le_conn_failed(conn, status);
|
||||
hci_conn_failed(conn, status);
|
||||
goto unlock;
|
||||
}
|
||||
|
||||
|
@ -4408,12 +4408,21 @@ static int hci_reject_conn_sync(struct hci_dev *hdev, struct hci_conn *conn,
|
||||
static int hci_abort_conn_sync(struct hci_dev *hdev, struct hci_conn *conn,
|
||||
u8 reason)
|
||||
{
|
||||
int err;
|
||||
|
||||
switch (conn->state) {
|
||||
case BT_CONNECTED:
|
||||
case BT_CONFIG:
|
||||
return hci_disconnect_sync(hdev, conn, reason);
|
||||
case BT_CONNECT:
|
||||
return hci_connect_cancel_sync(hdev, conn);
|
||||
err = hci_connect_cancel_sync(hdev, conn);
|
||||
/* Cleanup hci_conn object if it cannot be cancelled as it
|
||||
* likelly means the controller and host stack are out of sync.
|
||||
*/
|
||||
if (err)
|
||||
hci_conn_failed(conn, err);
|
||||
|
||||
return err;
|
||||
case BT_CONNECT2:
|
||||
return hci_reject_conn_sync(hdev, conn, reason);
|
||||
default:
|
||||
|
@ -108,6 +108,7 @@ struct xdp_test_data {
|
||||
struct page_pool *pp;
|
||||
struct xdp_frame **frames;
|
||||
struct sk_buff **skbs;
|
||||
struct xdp_mem_info mem;
|
||||
u32 batch_size;
|
||||
u32 frame_cnt;
|
||||
};
|
||||
@ -147,7 +148,6 @@ static void xdp_test_run_init_page(struct page *page, void *arg)
|
||||
|
||||
static int xdp_test_run_setup(struct xdp_test_data *xdp, struct xdp_buff *orig_ctx)
|
||||
{
|
||||
struct xdp_mem_info mem = {};
|
||||
struct page_pool *pp;
|
||||
int err = -ENOMEM;
|
||||
struct page_pool_params pp_params = {
|
||||
@ -174,7 +174,7 @@ static int xdp_test_run_setup(struct xdp_test_data *xdp, struct xdp_buff *orig_c
|
||||
}
|
||||
|
||||
/* will copy 'mem.id' into pp->xdp_mem_id */
|
||||
err = xdp_reg_mem_model(&mem, MEM_TYPE_PAGE_POOL, pp);
|
||||
err = xdp_reg_mem_model(&xdp->mem, MEM_TYPE_PAGE_POOL, pp);
|
||||
if (err)
|
||||
goto err_mmodel;
|
||||
|
||||
@ -202,6 +202,7 @@ err_skbs:
|
||||
|
||||
static void xdp_test_run_teardown(struct xdp_test_data *xdp)
|
||||
{
|
||||
xdp_unreg_mem_model(&xdp->mem);
|
||||
page_pool_destroy(xdp->pp);
|
||||
kfree(xdp->frames);
|
||||
kfree(xdp->skbs);
|
||||
|
@ -353,6 +353,8 @@ static int br_switchdev_vlan_attr_replay(struct net_device *br_dev,
|
||||
attr.orig_dev = br_dev;
|
||||
|
||||
vg = br_vlan_group(br);
|
||||
if (!vg)
|
||||
return 0;
|
||||
|
||||
list_for_each_entry(v, &vg->vlan_list, vlist) {
|
||||
if (v->msti) {
|
||||
|
@ -10304,7 +10304,7 @@ void netdev_stats_to_stats64(struct rtnl_link_stats64 *stats64,
|
||||
}
|
||||
EXPORT_SYMBOL(netdev_stats_to_stats64);
|
||||
|
||||
struct net_device_core_stats *netdev_core_stats_alloc(struct net_device *dev)
|
||||
struct net_device_core_stats __percpu *netdev_core_stats_alloc(struct net_device *dev)
|
||||
{
|
||||
struct net_device_core_stats __percpu *p;
|
||||
|
||||
@ -10315,11 +10315,7 @@ struct net_device_core_stats *netdev_core_stats_alloc(struct net_device *dev)
|
||||
free_percpu(p);
|
||||
|
||||
/* This READ_ONCE() pairs with the cmpxchg() above */
|
||||
p = READ_ONCE(dev->core_stats);
|
||||
if (!p)
|
||||
return NULL;
|
||||
|
||||
return this_cpu_ptr(p);
|
||||
return READ_ONCE(dev->core_stats);
|
||||
}
|
||||
EXPORT_SYMBOL(netdev_core_stats_alloc);
|
||||
|
||||
@ -10356,9 +10352,9 @@ struct rtnl_link_stats64 *dev_get_stats(struct net_device *dev,
|
||||
|
||||
for_each_possible_cpu(i) {
|
||||
core_stats = per_cpu_ptr(p, i);
|
||||
storage->rx_dropped += local_read(&core_stats->rx_dropped);
|
||||
storage->tx_dropped += local_read(&core_stats->tx_dropped);
|
||||
storage->rx_nohandler += local_read(&core_stats->rx_nohandler);
|
||||
storage->rx_dropped += READ_ONCE(core_stats->rx_dropped);
|
||||
storage->tx_dropped += READ_ONCE(core_stats->tx_dropped);
|
||||
storage->rx_nohandler += READ_ONCE(core_stats->rx_nohandler);
|
||||
}
|
||||
}
|
||||
return storage;
|
||||
|
@ -159,10 +159,8 @@ static int bpf_output(struct net *net, struct sock *sk, struct sk_buff *skb)
|
||||
return dst->lwtstate->orig_output(net, sk, skb);
|
||||
}
|
||||
|
||||
static int xmit_check_hhlen(struct sk_buff *skb)
|
||||
static int xmit_check_hhlen(struct sk_buff *skb, int hh_len)
|
||||
{
|
||||
int hh_len = skb_dst(skb)->dev->hard_header_len;
|
||||
|
||||
if (skb_headroom(skb) < hh_len) {
|
||||
int nhead = HH_DATA_ALIGN(hh_len - skb_headroom(skb));
|
||||
|
||||
@ -274,6 +272,7 @@ static int bpf_xmit(struct sk_buff *skb)
|
||||
|
||||
bpf = bpf_lwt_lwtunnel(dst->lwtstate);
|
||||
if (bpf->xmit.prog) {
|
||||
int hh_len = dst->dev->hard_header_len;
|
||||
__be16 proto = skb->protocol;
|
||||
int ret;
|
||||
|
||||
@ -291,7 +290,7 @@ static int bpf_xmit(struct sk_buff *skb)
|
||||
/* If the header was expanded, headroom might be too
|
||||
* small for L2 header to come, expand as needed.
|
||||
*/
|
||||
ret = xmit_check_hhlen(skb);
|
||||
ret = xmit_check_hhlen(skb, hh_len);
|
||||
if (unlikely(ret))
|
||||
return ret;
|
||||
|
||||
|
@ -1620,8 +1620,10 @@ int dsa_port_link_register_of(struct dsa_port *dp)
|
||||
if (ds->ops->phylink_mac_link_down)
|
||||
ds->ops->phylink_mac_link_down(ds, port,
|
||||
MLO_AN_FIXED, PHY_INTERFACE_MODE_NA);
|
||||
of_node_put(phy_np);
|
||||
return dsa_port_phylink_register(dp);
|
||||
}
|
||||
of_node_put(phy_np);
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -285,7 +285,7 @@ static void dsa_port_manage_cpu_flood(struct dsa_port *dp)
|
||||
if (other_dp->slave->flags & IFF_ALLMULTI)
|
||||
flags.val |= BR_MCAST_FLOOD;
|
||||
if (other_dp->slave->flags & IFF_PROMISC)
|
||||
flags.val |= BR_FLOOD;
|
||||
flags.val |= BR_FLOOD | BR_MCAST_FLOOD;
|
||||
}
|
||||
|
||||
err = dsa_port_pre_bridge_flags(dp, flags, NULL);
|
||||
|
@ -459,14 +459,12 @@ static void __gre_xmit(struct sk_buff *skb, struct net_device *dev,
|
||||
__be16 proto)
|
||||
{
|
||||
struct ip_tunnel *tunnel = netdev_priv(dev);
|
||||
|
||||
if (tunnel->parms.o_flags & TUNNEL_SEQ)
|
||||
tunnel->o_seqno++;
|
||||
__be16 flags = tunnel->parms.o_flags;
|
||||
|
||||
/* Push GRE header. */
|
||||
gre_build_header(skb, tunnel->tun_hlen,
|
||||
tunnel->parms.o_flags, proto, tunnel->parms.o_key,
|
||||
htonl(tunnel->o_seqno));
|
||||
flags, proto, tunnel->parms.o_key,
|
||||
(flags & TUNNEL_SEQ) ? htonl(atomic_fetch_inc(&tunnel->o_seqno)) : 0);
|
||||
|
||||
ip_tunnel_xmit(skb, dev, tnl_params, tnl_params->protocol);
|
||||
}
|
||||
@ -504,7 +502,7 @@ static void gre_fb_xmit(struct sk_buff *skb, struct net_device *dev,
|
||||
(TUNNEL_CSUM | TUNNEL_KEY | TUNNEL_SEQ);
|
||||
gre_build_header(skb, tunnel_hlen, flags, proto,
|
||||
tunnel_id_to_key32(tun_info->key.tun_id),
|
||||
(flags & TUNNEL_SEQ) ? htonl(tunnel->o_seqno++) : 0);
|
||||
(flags & TUNNEL_SEQ) ? htonl(atomic_fetch_inc(&tunnel->o_seqno)) : 0);
|
||||
|
||||
ip_md_tunnel_xmit(skb, dev, IPPROTO_GRE, tunnel_hlen);
|
||||
|
||||
@ -581,7 +579,7 @@ static void erspan_fb_xmit(struct sk_buff *skb, struct net_device *dev)
|
||||
}
|
||||
|
||||
gre_build_header(skb, 8, TUNNEL_SEQ,
|
||||
proto, 0, htonl(tunnel->o_seqno++));
|
||||
proto, 0, htonl(atomic_fetch_inc(&tunnel->o_seqno)));
|
||||
|
||||
ip_md_tunnel_xmit(skb, dev, IPPROTO_GRE, tunnel_hlen);
|
||||
|
||||
|
@ -281,6 +281,7 @@ bool cookie_ecn_ok(const struct tcp_options_received *tcp_opt,
|
||||
EXPORT_SYMBOL(cookie_ecn_ok);
|
||||
|
||||
struct request_sock *cookie_tcp_reqsk_alloc(const struct request_sock_ops *ops,
|
||||
const struct tcp_request_sock_ops *af_ops,
|
||||
struct sock *sk,
|
||||
struct sk_buff *skb)
|
||||
{
|
||||
@ -297,6 +298,10 @@ struct request_sock *cookie_tcp_reqsk_alloc(const struct request_sock_ops *ops,
|
||||
return NULL;
|
||||
|
||||
treq = tcp_rsk(req);
|
||||
|
||||
/* treq->af_specific might be used to perform TCP_MD5 lookup */
|
||||
treq->af_specific = af_ops;
|
||||
|
||||
treq->syn_tos = TCP_SKB_CB(skb)->ip_dsfield;
|
||||
#if IS_ENABLED(CONFIG_MPTCP)
|
||||
treq->is_mptcp = sk_is_mptcp(sk);
|
||||
@ -364,7 +369,8 @@ struct sock *cookie_v4_check(struct sock *sk, struct sk_buff *skb)
|
||||
goto out;
|
||||
|
||||
ret = NULL;
|
||||
req = cookie_tcp_reqsk_alloc(&tcp_request_sock_ops, sk, skb);
|
||||
req = cookie_tcp_reqsk_alloc(&tcp_request_sock_ops,
|
||||
&tcp_request_sock_ipv4_ops, sk, skb);
|
||||
if (!req)
|
||||
goto out;
|
||||
|
||||
|
@ -3867,7 +3867,8 @@ static int tcp_ack(struct sock *sk, const struct sk_buff *skb, int flag)
|
||||
tcp_process_tlp_ack(sk, ack, flag);
|
||||
|
||||
if (tcp_ack_is_dubious(sk, flag)) {
|
||||
if (!(flag & (FLAG_SND_UNA_ADVANCED | FLAG_NOT_DUP))) {
|
||||
if (!(flag & (FLAG_SND_UNA_ADVANCED |
|
||||
FLAG_NOT_DUP | FLAG_DSACKING_ACK))) {
|
||||
num_dupack = 1;
|
||||
/* Consider if pure acks were aggregated in tcp_add_backlog() */
|
||||
if (!(flag & FLAG_DATA))
|
||||
@ -5454,7 +5455,17 @@ static void tcp_new_space(struct sock *sk)
|
||||
INDIRECT_CALL_1(sk->sk_write_space, sk_stream_write_space, sk);
|
||||
}
|
||||
|
||||
static void tcp_check_space(struct sock *sk)
|
||||
/* Caller made space either from:
|
||||
* 1) Freeing skbs in rtx queues (after tp->snd_una has advanced)
|
||||
* 2) Sent skbs from output queue (and thus advancing tp->snd_nxt)
|
||||
*
|
||||
* We might be able to generate EPOLLOUT to the application if:
|
||||
* 1) Space consumed in output/rtx queues is below sk->sk_sndbuf/2
|
||||
* 2) notsent amount (tp->write_seq - tp->snd_nxt) became
|
||||
* small enough that tcp_stream_memory_free() decides it
|
||||
* is time to generate EPOLLOUT.
|
||||
*/
|
||||
void tcp_check_space(struct sock *sk)
|
||||
{
|
||||
/* pairs with tcp_poll() */
|
||||
smp_mb();
|
||||
|
@ -531,7 +531,7 @@ struct sock *tcp_create_openreq_child(const struct sock *sk,
|
||||
newtp->tsoffset = treq->ts_off;
|
||||
#ifdef CONFIG_TCP_MD5SIG
|
||||
newtp->md5sig_info = NULL; /*XXX*/
|
||||
if (newtp->af_specific->md5_lookup(sk, newsk))
|
||||
if (treq->af_specific->req_md5_lookup(sk, req_to_sk(req)))
|
||||
newtp->tcp_header_len += TCPOLEN_MD5SIG_ALIGNED;
|
||||
#endif
|
||||
if (skb->len >= TCP_MSS_DEFAULT + newtp->tcp_header_len)
|
||||
|
@ -82,6 +82,7 @@ static void tcp_event_new_data_sent(struct sock *sk, struct sk_buff *skb)
|
||||
|
||||
NET_ADD_STATS(sock_net(sk), LINUX_MIB_TCPORIGDATASENT,
|
||||
tcp_skb_pcount(skb));
|
||||
tcp_check_space(sk);
|
||||
}
|
||||
|
||||
/* SND.NXT, if window was not shrunk or the amount of shrunk was less than one
|
||||
|
@ -74,27 +74,32 @@ void tcp_rate_skb_sent(struct sock *sk, struct sk_buff *skb)
|
||||
*
|
||||
* If an ACK (s)acks multiple skbs (e.g., stretched-acks), this function is
|
||||
* called multiple times. We favor the information from the most recently
|
||||
* sent skb, i.e., the skb with the highest prior_delivered count.
|
||||
* sent skb, i.e., the skb with the most recently sent time and the highest
|
||||
* sequence.
|
||||
*/
|
||||
void tcp_rate_skb_delivered(struct sock *sk, struct sk_buff *skb,
|
||||
struct rate_sample *rs)
|
||||
{
|
||||
struct tcp_sock *tp = tcp_sk(sk);
|
||||
struct tcp_skb_cb *scb = TCP_SKB_CB(skb);
|
||||
u64 tx_tstamp;
|
||||
|
||||
if (!scb->tx.delivered_mstamp)
|
||||
return;
|
||||
|
||||
tx_tstamp = tcp_skb_timestamp_us(skb);
|
||||
if (!rs->prior_delivered ||
|
||||
after(scb->tx.delivered, rs->prior_delivered)) {
|
||||
tcp_skb_sent_after(tx_tstamp, tp->first_tx_mstamp,
|
||||
scb->end_seq, rs->last_end_seq)) {
|
||||
rs->prior_delivered_ce = scb->tx.delivered_ce;
|
||||
rs->prior_delivered = scb->tx.delivered;
|
||||
rs->prior_mstamp = scb->tx.delivered_mstamp;
|
||||
rs->is_app_limited = scb->tx.is_app_limited;
|
||||
rs->is_retrans = scb->sacked & TCPCB_RETRANS;
|
||||
rs->last_end_seq = scb->end_seq;
|
||||
|
||||
/* Record send time of most recently ACKed packet: */
|
||||
tp->first_tx_mstamp = tcp_skb_timestamp_us(skb);
|
||||
tp->first_tx_mstamp = tx_tstamp;
|
||||
/* Find the duration of the "send phase" of this window: */
|
||||
rs->interval_us = tcp_stamp_us_delta(tp->first_tx_mstamp,
|
||||
scb->tx.first_tx_mstamp);
|
||||
|
@ -724,6 +724,7 @@ static netdev_tx_t __gre6_xmit(struct sk_buff *skb,
|
||||
{
|
||||
struct ip6_tnl *tunnel = netdev_priv(dev);
|
||||
__be16 protocol;
|
||||
__be16 flags;
|
||||
|
||||
if (dev->type == ARPHRD_ETHER)
|
||||
IPCB(skb)->flags = 0;
|
||||
@ -739,7 +740,6 @@ static netdev_tx_t __gre6_xmit(struct sk_buff *skb,
|
||||
if (tunnel->parms.collect_md) {
|
||||
struct ip_tunnel_info *tun_info;
|
||||
const struct ip_tunnel_key *key;
|
||||
__be16 flags;
|
||||
int tun_hlen;
|
||||
|
||||
tun_info = skb_tunnel_info_txcheck(skb);
|
||||
@ -766,19 +766,19 @@ static netdev_tx_t __gre6_xmit(struct sk_buff *skb,
|
||||
gre_build_header(skb, tun_hlen,
|
||||
flags, protocol,
|
||||
tunnel_id_to_key32(tun_info->key.tun_id),
|
||||
(flags & TUNNEL_SEQ) ? htonl(tunnel->o_seqno++)
|
||||
(flags & TUNNEL_SEQ) ? htonl(atomic_fetch_inc(&tunnel->o_seqno))
|
||||
: 0);
|
||||
|
||||
} else {
|
||||
if (tunnel->parms.o_flags & TUNNEL_SEQ)
|
||||
tunnel->o_seqno++;
|
||||
|
||||
if (skb_cow_head(skb, dev->needed_headroom ?: tunnel->hlen))
|
||||
return -ENOMEM;
|
||||
|
||||
gre_build_header(skb, tunnel->tun_hlen, tunnel->parms.o_flags,
|
||||
flags = tunnel->parms.o_flags;
|
||||
|
||||
gre_build_header(skb, tunnel->tun_hlen, flags,
|
||||
protocol, tunnel->parms.o_key,
|
||||
htonl(tunnel->o_seqno));
|
||||
(flags & TUNNEL_SEQ) ? htonl(atomic_fetch_inc(&tunnel->o_seqno))
|
||||
: 0);
|
||||
}
|
||||
|
||||
return ip6_tnl_xmit(skb, dev, dsfield, fl6, encap_limit, pmtu,
|
||||
@ -1056,7 +1056,7 @@ static netdev_tx_t ip6erspan_tunnel_xmit(struct sk_buff *skb,
|
||||
/* Push GRE header. */
|
||||
proto = (t->parms.erspan_ver == 1) ? htons(ETH_P_ERSPAN)
|
||||
: htons(ETH_P_ERSPAN2);
|
||||
gre_build_header(skb, 8, TUNNEL_SEQ, proto, 0, htonl(t->o_seqno++));
|
||||
gre_build_header(skb, 8, TUNNEL_SEQ, proto, 0, htonl(atomic_fetch_inc(&t->o_seqno)));
|
||||
|
||||
/* TooBig packet may have updated dst->dev's mtu */
|
||||
if (!t->parms.collect_md && dst && dst_mtu(dst) > dst->dev->mtu)
|
||||
|
@ -24,14 +24,13 @@ int ip6_route_me_harder(struct net *net, struct sock *sk_partial, struct sk_buff
|
||||
{
|
||||
const struct ipv6hdr *iph = ipv6_hdr(skb);
|
||||
struct sock *sk = sk_to_full_sk(sk_partial);
|
||||
struct net_device *dev = skb_dst(skb)->dev;
|
||||
struct flow_keys flkeys;
|
||||
unsigned int hh_len;
|
||||
struct dst_entry *dst;
|
||||
int strict = (ipv6_addr_type(&iph->daddr) &
|
||||
(IPV6_ADDR_MULTICAST | IPV6_ADDR_LINKLOCAL));
|
||||
struct flowi6 fl6 = {
|
||||
.flowi6_oif = sk && sk->sk_bound_dev_if ? sk->sk_bound_dev_if :
|
||||
strict ? skb_dst(skb)->dev->ifindex : 0,
|
||||
.flowi6_mark = skb->mark,
|
||||
.flowi6_uid = sock_net_uid(net, sk),
|
||||
.daddr = iph->daddr,
|
||||
@ -39,6 +38,13 @@ int ip6_route_me_harder(struct net *net, struct sock *sk_partial, struct sk_buff
|
||||
};
|
||||
int err;
|
||||
|
||||
if (sk && sk->sk_bound_dev_if)
|
||||
fl6.flowi6_oif = sk->sk_bound_dev_if;
|
||||
else if (strict)
|
||||
fl6.flowi6_oif = dev->ifindex;
|
||||
else
|
||||
fl6.flowi6_oif = l3mdev_master_ifindex(dev);
|
||||
|
||||
fib6_rules_early_flow_dissect(net, skb, &fl6, &flkeys);
|
||||
dst = ip6_route_output(net, sk, &fl6);
|
||||
err = dst->error;
|
||||
|
@ -170,7 +170,8 @@ struct sock *cookie_v6_check(struct sock *sk, struct sk_buff *skb)
|
||||
goto out;
|
||||
|
||||
ret = NULL;
|
||||
req = cookie_tcp_reqsk_alloc(&tcp6_request_sock_ops, sk, skb);
|
||||
req = cookie_tcp_reqsk_alloc(&tcp6_request_sock_ops,
|
||||
&tcp_request_sock_ipv6_ops, sk, skb);
|
||||
if (!req)
|
||||
goto out;
|
||||
|
||||
|
@ -313,6 +313,7 @@ void mctp_dev_hold(struct mctp_dev *mdev)
|
||||
void mctp_dev_put(struct mctp_dev *mdev)
|
||||
{
|
||||
if (mdev && refcount_dec_and_test(&mdev->refs)) {
|
||||
kfree(mdev->addrs);
|
||||
dev_put(mdev->dev);
|
||||
kfree_rcu(mdev, rcu);
|
||||
}
|
||||
@ -441,7 +442,6 @@ static void mctp_unregister(struct net_device *dev)
|
||||
|
||||
mctp_route_remove_dev(mdev);
|
||||
mctp_neigh_remove_dev(mdev);
|
||||
kfree(mdev->addrs);
|
||||
|
||||
mctp_dev_put(mdev);
|
||||
}
|
||||
|
@ -1495,7 +1495,7 @@ int __init ip_vs_conn_init(void)
|
||||
pr_info("Connection hash table configured "
|
||||
"(size=%d, memory=%ldKbytes)\n",
|
||||
ip_vs_conn_tab_size,
|
||||
(long)(ip_vs_conn_tab_size*sizeof(struct list_head))/1024);
|
||||
(long)(ip_vs_conn_tab_size*sizeof(*ip_vs_conn_tab))/1024);
|
||||
IP_VS_DBG(0, "Each connection entry needs %zd bytes at least\n",
|
||||
sizeof(struct ip_vs_conn));
|
||||
|
||||
|
@ -556,24 +556,14 @@ static bool tcp_in_window(struct nf_conn *ct,
|
||||
}
|
||||
|
||||
}
|
||||
} else if (((state->state == TCP_CONNTRACK_SYN_SENT
|
||||
&& dir == IP_CT_DIR_ORIGINAL)
|
||||
|| (state->state == TCP_CONNTRACK_SYN_RECV
|
||||
&& dir == IP_CT_DIR_REPLY))
|
||||
&& after(end, sender->td_end)) {
|
||||
} else if (tcph->syn &&
|
||||
after(end, sender->td_end) &&
|
||||
(state->state == TCP_CONNTRACK_SYN_SENT ||
|
||||
state->state == TCP_CONNTRACK_SYN_RECV)) {
|
||||
/*
|
||||
* RFC 793: "if a TCP is reinitialized ... then it need
|
||||
* not wait at all; it must only be sure to use sequence
|
||||
* numbers larger than those recently used."
|
||||
*/
|
||||
sender->td_end =
|
||||
sender->td_maxend = end;
|
||||
sender->td_maxwin = (win == 0 ? 1 : win);
|
||||
|
||||
tcp_options(skb, dataoff, tcph, sender);
|
||||
} else if (tcph->syn && dir == IP_CT_DIR_REPLY &&
|
||||
state->state == TCP_CONNTRACK_SYN_SENT) {
|
||||
/* Retransmitted syn-ack, or syn (simultaneous open).
|
||||
*
|
||||
* Re-init state for this direction, just like for the first
|
||||
* syn(-ack) reply, it might differ in seq, ack or tcp options.
|
||||
@ -581,7 +571,8 @@ static bool tcp_in_window(struct nf_conn *ct,
|
||||
tcp_init_sender(sender, receiver,
|
||||
skb, dataoff, tcph,
|
||||
end, win);
|
||||
if (!tcph->ack)
|
||||
|
||||
if (dir == IP_CT_DIR_REPLY && !tcph->ack)
|
||||
return true;
|
||||
}
|
||||
|
||||
|
@ -823,7 +823,7 @@ static struct ctl_table nf_ct_sysctl_table[] = {
|
||||
.mode = 0644,
|
||||
.proc_handler = proc_dointvec_jiffies,
|
||||
},
|
||||
#if IS_ENABLED(CONFIG_NFT_FLOW_OFFLOAD)
|
||||
#if IS_ENABLED(CONFIG_NF_FLOW_TABLE)
|
||||
[NF_SYSCTL_CT_PROTO_TIMEOUT_UDP_OFFLOAD] = {
|
||||
.procname = "nf_flowtable_udp_timeout",
|
||||
.maxlen = sizeof(unsigned int),
|
||||
|
@ -349,7 +349,11 @@ static int __nft_rbtree_insert(const struct net *net, const struct nft_set *set,
|
||||
*ext = &rbe->ext;
|
||||
return -EEXIST;
|
||||
} else {
|
||||
p = &parent->rb_left;
|
||||
overlap = false;
|
||||
if (nft_rbtree_interval_end(rbe))
|
||||
p = &parent->rb_left;
|
||||
else
|
||||
p = &parent->rb_right;
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -54,6 +54,32 @@ nft_sock_get_eval_cgroupv2(u32 *dest, struct sock *sk, const struct nft_pktinfo
|
||||
}
|
||||
#endif
|
||||
|
||||
static struct sock *nft_socket_do_lookup(const struct nft_pktinfo *pkt)
|
||||
{
|
||||
const struct net_device *indev = nft_in(pkt);
|
||||
const struct sk_buff *skb = pkt->skb;
|
||||
struct sock *sk = NULL;
|
||||
|
||||
if (!indev)
|
||||
return NULL;
|
||||
|
||||
switch (nft_pf(pkt)) {
|
||||
case NFPROTO_IPV4:
|
||||
sk = nf_sk_lookup_slow_v4(nft_net(pkt), skb, indev);
|
||||
break;
|
||||
#if IS_ENABLED(CONFIG_NF_TABLES_IPV6)
|
||||
case NFPROTO_IPV6:
|
||||
sk = nf_sk_lookup_slow_v6(nft_net(pkt), skb, indev);
|
||||
break;
|
||||
#endif
|
||||
default:
|
||||
WARN_ON_ONCE(1);
|
||||
break;
|
||||
}
|
||||
|
||||
return sk;
|
||||
}
|
||||
|
||||
static void nft_socket_eval(const struct nft_expr *expr,
|
||||
struct nft_regs *regs,
|
||||
const struct nft_pktinfo *pkt)
|
||||
@ -67,20 +93,7 @@ static void nft_socket_eval(const struct nft_expr *expr,
|
||||
sk = NULL;
|
||||
|
||||
if (!sk)
|
||||
switch(nft_pf(pkt)) {
|
||||
case NFPROTO_IPV4:
|
||||
sk = nf_sk_lookup_slow_v4(nft_net(pkt), skb, nft_in(pkt));
|
||||
break;
|
||||
#if IS_ENABLED(CONFIG_NF_TABLES_IPV6)
|
||||
case NFPROTO_IPV6:
|
||||
sk = nf_sk_lookup_slow_v6(nft_net(pkt), skb, nft_in(pkt));
|
||||
break;
|
||||
#endif
|
||||
default:
|
||||
WARN_ON_ONCE(1);
|
||||
regs->verdict.code = NFT_BREAK;
|
||||
return;
|
||||
}
|
||||
sk = nft_socket_do_lookup(pkt);
|
||||
|
||||
if (!sk) {
|
||||
regs->verdict.code = NFT_BREAK;
|
||||
@ -224,6 +237,16 @@ static bool nft_socket_reduce(struct nft_regs_track *track,
|
||||
return nft_expr_reduce_bitwise(track, expr);
|
||||
}
|
||||
|
||||
static int nft_socket_validate(const struct nft_ctx *ctx,
|
||||
const struct nft_expr *expr,
|
||||
const struct nft_data **data)
|
||||
{
|
||||
return nft_chain_validate_hooks(ctx->chain,
|
||||
(1 << NF_INET_PRE_ROUTING) |
|
||||
(1 << NF_INET_LOCAL_IN) |
|
||||
(1 << NF_INET_LOCAL_OUT));
|
||||
}
|
||||
|
||||
static struct nft_expr_type nft_socket_type;
|
||||
static const struct nft_expr_ops nft_socket_ops = {
|
||||
.type = &nft_socket_type,
|
||||
@ -231,6 +254,7 @@ static const struct nft_expr_ops nft_socket_ops = {
|
||||
.eval = nft_socket_eval,
|
||||
.init = nft_socket_init,
|
||||
.dump = nft_socket_dump,
|
||||
.validate = nft_socket_validate,
|
||||
.reduce = nft_socket_reduce,
|
||||
};
|
||||
|
||||
|
@ -458,6 +458,10 @@ void sctp_generate_reconf_event(struct timer_list *t)
|
||||
goto out_unlock;
|
||||
}
|
||||
|
||||
/* This happens when the response arrives after the timer is triggered. */
|
||||
if (!asoc->strreset_chunk)
|
||||
goto out_unlock;
|
||||
|
||||
error = sctp_do_sm(net, SCTP_EVENT_T_TIMEOUT,
|
||||
SCTP_ST_TIMEOUT(SCTP_EVENT_TIMEOUT_RECONF),
|
||||
asoc->state, asoc->ep, asoc,
|
||||
|
137
net/smc/af_smc.c
137
net/smc/af_smc.c
@ -243,11 +243,27 @@ struct proto smc_proto6 = {
|
||||
};
|
||||
EXPORT_SYMBOL_GPL(smc_proto6);
|
||||
|
||||
static void smc_fback_restore_callbacks(struct smc_sock *smc)
|
||||
{
|
||||
struct sock *clcsk = smc->clcsock->sk;
|
||||
|
||||
write_lock_bh(&clcsk->sk_callback_lock);
|
||||
clcsk->sk_user_data = NULL;
|
||||
|
||||
smc_clcsock_restore_cb(&clcsk->sk_state_change, &smc->clcsk_state_change);
|
||||
smc_clcsock_restore_cb(&clcsk->sk_data_ready, &smc->clcsk_data_ready);
|
||||
smc_clcsock_restore_cb(&clcsk->sk_write_space, &smc->clcsk_write_space);
|
||||
smc_clcsock_restore_cb(&clcsk->sk_error_report, &smc->clcsk_error_report);
|
||||
|
||||
write_unlock_bh(&clcsk->sk_callback_lock);
|
||||
}
|
||||
|
||||
static void smc_restore_fallback_changes(struct smc_sock *smc)
|
||||
{
|
||||
if (smc->clcsock->file) { /* non-accepted sockets have no file yet */
|
||||
smc->clcsock->file->private_data = smc->sk.sk_socket;
|
||||
smc->clcsock->file = NULL;
|
||||
smc_fback_restore_callbacks(smc);
|
||||
}
|
||||
}
|
||||
|
||||
@ -373,6 +389,7 @@ static struct sock *smc_sock_alloc(struct net *net, struct socket *sock,
|
||||
sk->sk_prot->hash(sk);
|
||||
sk_refcnt_debug_inc(sk);
|
||||
mutex_init(&smc->clcsock_release_lock);
|
||||
smc_init_saved_callbacks(smc);
|
||||
|
||||
return sk;
|
||||
}
|
||||
@ -744,47 +761,73 @@ out:
|
||||
|
||||
static void smc_fback_state_change(struct sock *clcsk)
|
||||
{
|
||||
struct smc_sock *smc =
|
||||
smc_clcsock_user_data(clcsk);
|
||||
struct smc_sock *smc;
|
||||
|
||||
if (!smc)
|
||||
return;
|
||||
smc_fback_forward_wakeup(smc, clcsk, smc->clcsk_state_change);
|
||||
read_lock_bh(&clcsk->sk_callback_lock);
|
||||
smc = smc_clcsock_user_data(clcsk);
|
||||
if (smc)
|
||||
smc_fback_forward_wakeup(smc, clcsk,
|
||||
smc->clcsk_state_change);
|
||||
read_unlock_bh(&clcsk->sk_callback_lock);
|
||||
}
|
||||
|
||||
static void smc_fback_data_ready(struct sock *clcsk)
|
||||
{
|
||||
struct smc_sock *smc =
|
||||
smc_clcsock_user_data(clcsk);
|
||||
struct smc_sock *smc;
|
||||
|
||||
if (!smc)
|
||||
return;
|
||||
smc_fback_forward_wakeup(smc, clcsk, smc->clcsk_data_ready);
|
||||
read_lock_bh(&clcsk->sk_callback_lock);
|
||||
smc = smc_clcsock_user_data(clcsk);
|
||||
if (smc)
|
||||
smc_fback_forward_wakeup(smc, clcsk,
|
||||
smc->clcsk_data_ready);
|
||||
read_unlock_bh(&clcsk->sk_callback_lock);
|
||||
}
|
||||
|
||||
static void smc_fback_write_space(struct sock *clcsk)
|
||||
{
|
||||
struct smc_sock *smc =
|
||||
smc_clcsock_user_data(clcsk);
|
||||
struct smc_sock *smc;
|
||||
|
||||
if (!smc)
|
||||
return;
|
||||
smc_fback_forward_wakeup(smc, clcsk, smc->clcsk_write_space);
|
||||
read_lock_bh(&clcsk->sk_callback_lock);
|
||||
smc = smc_clcsock_user_data(clcsk);
|
||||
if (smc)
|
||||
smc_fback_forward_wakeup(smc, clcsk,
|
||||
smc->clcsk_write_space);
|
||||
read_unlock_bh(&clcsk->sk_callback_lock);
|
||||
}
|
||||
|
||||
static void smc_fback_error_report(struct sock *clcsk)
|
||||
{
|
||||
struct smc_sock *smc =
|
||||
smc_clcsock_user_data(clcsk);
|
||||
struct smc_sock *smc;
|
||||
|
||||
if (!smc)
|
||||
return;
|
||||
smc_fback_forward_wakeup(smc, clcsk, smc->clcsk_error_report);
|
||||
read_lock_bh(&clcsk->sk_callback_lock);
|
||||
smc = smc_clcsock_user_data(clcsk);
|
||||
if (smc)
|
||||
smc_fback_forward_wakeup(smc, clcsk,
|
||||
smc->clcsk_error_report);
|
||||
read_unlock_bh(&clcsk->sk_callback_lock);
|
||||
}
|
||||
|
||||
static void smc_fback_replace_callbacks(struct smc_sock *smc)
|
||||
{
|
||||
struct sock *clcsk = smc->clcsock->sk;
|
||||
|
||||
write_lock_bh(&clcsk->sk_callback_lock);
|
||||
clcsk->sk_user_data = (void *)((uintptr_t)smc | SK_USER_DATA_NOCOPY);
|
||||
|
||||
smc_clcsock_replace_cb(&clcsk->sk_state_change, smc_fback_state_change,
|
||||
&smc->clcsk_state_change);
|
||||
smc_clcsock_replace_cb(&clcsk->sk_data_ready, smc_fback_data_ready,
|
||||
&smc->clcsk_data_ready);
|
||||
smc_clcsock_replace_cb(&clcsk->sk_write_space, smc_fback_write_space,
|
||||
&smc->clcsk_write_space);
|
||||
smc_clcsock_replace_cb(&clcsk->sk_error_report, smc_fback_error_report,
|
||||
&smc->clcsk_error_report);
|
||||
|
||||
write_unlock_bh(&clcsk->sk_callback_lock);
|
||||
}
|
||||
|
||||
static int smc_switch_to_fallback(struct smc_sock *smc, int reason_code)
|
||||
{
|
||||
struct sock *clcsk;
|
||||
int rc = 0;
|
||||
|
||||
mutex_lock(&smc->clcsock_release_lock);
|
||||
@ -792,10 +835,7 @@ static int smc_switch_to_fallback(struct smc_sock *smc, int reason_code)
|
||||
rc = -EBADF;
|
||||
goto out;
|
||||
}
|
||||
clcsk = smc->clcsock->sk;
|
||||
|
||||
if (smc->use_fallback)
|
||||
goto out;
|
||||
smc->use_fallback = true;
|
||||
smc->fallback_rsn = reason_code;
|
||||
smc_stat_fallback(smc);
|
||||
@ -810,18 +850,7 @@ static int smc_switch_to_fallback(struct smc_sock *smc, int reason_code)
|
||||
* in smc sk->sk_wq and they should be woken up
|
||||
* as clcsock's wait queue is woken up.
|
||||
*/
|
||||
smc->clcsk_state_change = clcsk->sk_state_change;
|
||||
smc->clcsk_data_ready = clcsk->sk_data_ready;
|
||||
smc->clcsk_write_space = clcsk->sk_write_space;
|
||||
smc->clcsk_error_report = clcsk->sk_error_report;
|
||||
|
||||
clcsk->sk_state_change = smc_fback_state_change;
|
||||
clcsk->sk_data_ready = smc_fback_data_ready;
|
||||
clcsk->sk_write_space = smc_fback_write_space;
|
||||
clcsk->sk_error_report = smc_fback_error_report;
|
||||
|
||||
smc->clcsock->sk->sk_user_data =
|
||||
(void *)((uintptr_t)smc | SK_USER_DATA_NOCOPY);
|
||||
smc_fback_replace_callbacks(smc);
|
||||
}
|
||||
out:
|
||||
mutex_unlock(&smc->clcsock_release_lock);
|
||||
@ -1475,6 +1504,8 @@ static void smc_connect_work(struct work_struct *work)
|
||||
smc->sk.sk_state = SMC_CLOSED;
|
||||
if (rc == -EPIPE || rc == -EAGAIN)
|
||||
smc->sk.sk_err = EPIPE;
|
||||
else if (rc == -ECONNREFUSED)
|
||||
smc->sk.sk_err = ECONNREFUSED;
|
||||
else if (signal_pending(current))
|
||||
smc->sk.sk_err = -sock_intr_errno(timeo);
|
||||
sock_put(&smc->sk); /* passive closing */
|
||||
@ -1594,6 +1625,19 @@ static int smc_clcsock_accept(struct smc_sock *lsmc, struct smc_sock **new_smc)
|
||||
* function; switch it back to the original sk_data_ready function
|
||||
*/
|
||||
new_clcsock->sk->sk_data_ready = lsmc->clcsk_data_ready;
|
||||
|
||||
/* if new clcsock has also inherited the fallback-specific callback
|
||||
* functions, switch them back to the original ones.
|
||||
*/
|
||||
if (lsmc->use_fallback) {
|
||||
if (lsmc->clcsk_state_change)
|
||||
new_clcsock->sk->sk_state_change = lsmc->clcsk_state_change;
|
||||
if (lsmc->clcsk_write_space)
|
||||
new_clcsock->sk->sk_write_space = lsmc->clcsk_write_space;
|
||||
if (lsmc->clcsk_error_report)
|
||||
new_clcsock->sk->sk_error_report = lsmc->clcsk_error_report;
|
||||
}
|
||||
|
||||
(*new_smc)->clcsock = new_clcsock;
|
||||
out:
|
||||
return rc;
|
||||
@ -2353,17 +2397,20 @@ out:
|
||||
|
||||
static void smc_clcsock_data_ready(struct sock *listen_clcsock)
|
||||
{
|
||||
struct smc_sock *lsmc =
|
||||
smc_clcsock_user_data(listen_clcsock);
|
||||
struct smc_sock *lsmc;
|
||||
|
||||
read_lock_bh(&listen_clcsock->sk_callback_lock);
|
||||
lsmc = smc_clcsock_user_data(listen_clcsock);
|
||||
if (!lsmc)
|
||||
return;
|
||||
goto out;
|
||||
lsmc->clcsk_data_ready(listen_clcsock);
|
||||
if (lsmc->sk.sk_state == SMC_LISTEN) {
|
||||
sock_hold(&lsmc->sk); /* sock_put in smc_tcp_listen_work() */
|
||||
if (!queue_work(smc_tcp_ls_wq, &lsmc->tcp_listen_work))
|
||||
sock_put(&lsmc->sk);
|
||||
}
|
||||
out:
|
||||
read_unlock_bh(&listen_clcsock->sk_callback_lock);
|
||||
}
|
||||
|
||||
static int smc_listen(struct socket *sock, int backlog)
|
||||
@ -2395,10 +2442,12 @@ static int smc_listen(struct socket *sock, int backlog)
|
||||
/* save original sk_data_ready function and establish
|
||||
* smc-specific sk_data_ready function
|
||||
*/
|
||||
smc->clcsk_data_ready = smc->clcsock->sk->sk_data_ready;
|
||||
smc->clcsock->sk->sk_data_ready = smc_clcsock_data_ready;
|
||||
write_lock_bh(&smc->clcsock->sk->sk_callback_lock);
|
||||
smc->clcsock->sk->sk_user_data =
|
||||
(void *)((uintptr_t)smc | SK_USER_DATA_NOCOPY);
|
||||
smc_clcsock_replace_cb(&smc->clcsock->sk->sk_data_ready,
|
||||
smc_clcsock_data_ready, &smc->clcsk_data_ready);
|
||||
write_unlock_bh(&smc->clcsock->sk->sk_callback_lock);
|
||||
|
||||
/* save original ops */
|
||||
smc->ori_af_ops = inet_csk(smc->clcsock->sk)->icsk_af_ops;
|
||||
@ -2413,7 +2462,11 @@ static int smc_listen(struct socket *sock, int backlog)
|
||||
|
||||
rc = kernel_listen(smc->clcsock, backlog);
|
||||
if (rc) {
|
||||
smc->clcsock->sk->sk_data_ready = smc->clcsk_data_ready;
|
||||
write_lock_bh(&smc->clcsock->sk->sk_callback_lock);
|
||||
smc_clcsock_restore_cb(&smc->clcsock->sk->sk_data_ready,
|
||||
&smc->clcsk_data_ready);
|
||||
smc->clcsock->sk->sk_user_data = NULL;
|
||||
write_unlock_bh(&smc->clcsock->sk->sk_callback_lock);
|
||||
goto out;
|
||||
}
|
||||
sk->sk_max_ack_backlog = backlog;
|
||||
|
@ -288,12 +288,41 @@ static inline struct smc_sock *smc_sk(const struct sock *sk)
|
||||
return (struct smc_sock *)sk;
|
||||
}
|
||||
|
||||
static inline void smc_init_saved_callbacks(struct smc_sock *smc)
|
||||
{
|
||||
smc->clcsk_state_change = NULL;
|
||||
smc->clcsk_data_ready = NULL;
|
||||
smc->clcsk_write_space = NULL;
|
||||
smc->clcsk_error_report = NULL;
|
||||
}
|
||||
|
||||
static inline struct smc_sock *smc_clcsock_user_data(const struct sock *clcsk)
|
||||
{
|
||||
return (struct smc_sock *)
|
||||
((uintptr_t)clcsk->sk_user_data & ~SK_USER_DATA_NOCOPY);
|
||||
}
|
||||
|
||||
/* save target_cb in saved_cb, and replace target_cb with new_cb */
|
||||
static inline void smc_clcsock_replace_cb(void (**target_cb)(struct sock *),
|
||||
void (*new_cb)(struct sock *),
|
||||
void (**saved_cb)(struct sock *))
|
||||
{
|
||||
/* only save once */
|
||||
if (!*saved_cb)
|
||||
*saved_cb = *target_cb;
|
||||
*target_cb = new_cb;
|
||||
}
|
||||
|
||||
/* restore target_cb to saved_cb, and reset saved_cb to NULL */
|
||||
static inline void smc_clcsock_restore_cb(void (**target_cb)(struct sock *),
|
||||
void (**saved_cb)(struct sock *))
|
||||
{
|
||||
if (!*saved_cb)
|
||||
return;
|
||||
*target_cb = *saved_cb;
|
||||
*saved_cb = NULL;
|
||||
}
|
||||
|
||||
extern struct workqueue_struct *smc_hs_wq; /* wq for handshake work */
|
||||
extern struct workqueue_struct *smc_close_wq; /* wq for close work */
|
||||
|
||||
|
@ -214,8 +214,11 @@ again:
|
||||
sk->sk_state = SMC_CLOSED;
|
||||
sk->sk_state_change(sk); /* wake up accept */
|
||||
if (smc->clcsock && smc->clcsock->sk) {
|
||||
smc->clcsock->sk->sk_data_ready = smc->clcsk_data_ready;
|
||||
write_lock_bh(&smc->clcsock->sk->sk_callback_lock);
|
||||
smc_clcsock_restore_cb(&smc->clcsock->sk->sk_data_ready,
|
||||
&smc->clcsk_data_ready);
|
||||
smc->clcsock->sk->sk_user_data = NULL;
|
||||
write_unlock_bh(&smc->clcsock->sk->sk_callback_lock);
|
||||
rc = kernel_sock_shutdown(smc->clcsock, SHUT_RDWR);
|
||||
}
|
||||
smc_close_cleanup_listen(sk);
|
||||
|
@ -483,11 +483,13 @@ handle_error:
|
||||
copy = min_t(size_t, size, (pfrag->size - pfrag->offset));
|
||||
copy = min_t(size_t, copy, (max_open_record_len - record->len));
|
||||
|
||||
rc = tls_device_copy_data(page_address(pfrag->page) +
|
||||
pfrag->offset, copy, msg_iter);
|
||||
if (rc)
|
||||
goto handle_error;
|
||||
tls_append_frag(record, pfrag, copy);
|
||||
if (copy) {
|
||||
rc = tls_device_copy_data(page_address(pfrag->page) +
|
||||
pfrag->offset, copy, msg_iter);
|
||||
if (rc)
|
||||
goto handle_error;
|
||||
tls_append_frag(record, pfrag, copy);
|
||||
}
|
||||
|
||||
size -= copy;
|
||||
if (!size) {
|
||||
|
@ -639,7 +639,7 @@ static int __xsk_sendmsg(struct socket *sock, struct msghdr *m, size_t total_len
|
||||
if (sk_can_busy_loop(sk))
|
||||
sk_busy_loop(sk, 1); /* only support non-blocking sockets */
|
||||
|
||||
if (xsk_no_wakeup(sk))
|
||||
if (xs->zc && xsk_no_wakeup(sk))
|
||||
return 0;
|
||||
|
||||
pool = xs->pool;
|
||||
@ -967,6 +967,19 @@ static int xsk_bind(struct socket *sock, struct sockaddr *addr, int addr_len)
|
||||
|
||||
xp_get_pool(umem_xs->pool);
|
||||
xs->pool = umem_xs->pool;
|
||||
|
||||
/* If underlying shared umem was created without Tx
|
||||
* ring, allocate Tx descs array that Tx batching API
|
||||
* utilizes
|
||||
*/
|
||||
if (xs->tx && !xs->pool->tx_descs) {
|
||||
err = xp_alloc_tx_descs(xs->pool, xs);
|
||||
if (err) {
|
||||
xp_put_pool(xs->pool);
|
||||
sockfd_put(sock);
|
||||
goto out_unlock;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
xdp_get_umem(umem_xs->umem);
|
||||
|
@ -42,6 +42,16 @@ void xp_destroy(struct xsk_buff_pool *pool)
|
||||
kvfree(pool);
|
||||
}
|
||||
|
||||
int xp_alloc_tx_descs(struct xsk_buff_pool *pool, struct xdp_sock *xs)
|
||||
{
|
||||
pool->tx_descs = kvcalloc(xs->tx->nentries, sizeof(*pool->tx_descs),
|
||||
GFP_KERNEL);
|
||||
if (!pool->tx_descs)
|
||||
return -ENOMEM;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
struct xsk_buff_pool *xp_create_and_assign_umem(struct xdp_sock *xs,
|
||||
struct xdp_umem *umem)
|
||||
{
|
||||
@ -59,11 +69,9 @@ struct xsk_buff_pool *xp_create_and_assign_umem(struct xdp_sock *xs,
|
||||
if (!pool->heads)
|
||||
goto out;
|
||||
|
||||
if (xs->tx) {
|
||||
pool->tx_descs = kcalloc(xs->tx->nentries, sizeof(*pool->tx_descs), GFP_KERNEL);
|
||||
if (!pool->tx_descs)
|
||||
if (xs->tx)
|
||||
if (xp_alloc_tx_descs(pool, xs))
|
||||
goto out;
|
||||
}
|
||||
|
||||
pool->chunk_mask = ~((u64)umem->chunk_size - 1);
|
||||
pool->addrs_cnt = umem->size;
|
||||
|
@ -1,3 +1,4 @@
|
||||
CONFIG_ACPI=y
|
||||
CONFIG_SERIAL_8250=y
|
||||
CONFIG_SERIAL_8250_CONSOLE=y
|
||||
CONFIG_CMDLINE_BOOL=y
|
||||
|
@ -1,3 +1,4 @@
|
||||
CONFIG_ACPI=y
|
||||
CONFIG_SERIAL_8250=y
|
||||
CONFIG_SERIAL_8250_CONSOLE=y
|
||||
CONFIG_CMDLINE_BOOL=y
|
||||
|
Loading…
Reference in New Issue
Block a user