Merge git://git.kernel.org/pub/scm/linux/kernel/git/netdev/net
Merge in the left-over fixes before the net-next pull-request. net/mptcp/subflow.cd3295fee3c
("mptcp: use proper req destructor for IPv6")36b122baf6
("mptcp: add subflow_v(4,6)_send_synack()") Signed-off-by: Paolo Abeni <pabeni@redhat.com>
This commit is contained in:
commit
b11919e1bb
@ -7,7 +7,8 @@ $schema: http://devicetree.org/meta-schemas/core.yaml#
|
||||
title: Freescale INTMUX interrupt multiplexer
|
||||
|
||||
maintainers:
|
||||
- Joakim Zhang <qiangqing.zhang@nxp.com>
|
||||
- Shawn Guo <shawnguo@kernel.org>
|
||||
- NXP Linux Team <linux-imx@nxp.com>
|
||||
|
||||
properties:
|
||||
compatible:
|
||||
|
@ -7,7 +7,9 @@ $schema: http://devicetree.org/meta-schemas/core.yaml#
|
||||
title: Freescale Fast Ethernet Controller (FEC)
|
||||
|
||||
maintainers:
|
||||
- Joakim Zhang <qiangqing.zhang@nxp.com>
|
||||
- Shawn Guo <shawnguo@kernel.org>
|
||||
- Wei Fang <wei.fang@nxp.com>
|
||||
- NXP Linux Team <linux-imx@nxp.com>
|
||||
|
||||
allOf:
|
||||
- $ref: ethernet-controller.yaml#
|
||||
|
@ -7,7 +7,9 @@ $schema: http://devicetree.org/meta-schemas/core.yaml#
|
||||
title: NXP i.MX8 DWMAC glue layer
|
||||
|
||||
maintainers:
|
||||
- Joakim Zhang <qiangqing.zhang@nxp.com>
|
||||
- Clark Wang <xiaoning.wang@nxp.com>
|
||||
- Shawn Guo <shawnguo@kernel.org>
|
||||
- NXP Linux Team <linux-imx@nxp.com>
|
||||
|
||||
# We need a select here so we don't match all nodes with 'snps,dwmac'
|
||||
select:
|
||||
|
@ -8188,7 +8188,10 @@ S: Maintained
|
||||
F: drivers/i2c/busses/i2c-cpm.c
|
||||
|
||||
FREESCALE IMX / MXC FEC DRIVER
|
||||
M: Joakim Zhang <qiangqing.zhang@nxp.com>
|
||||
M: Wei Fang <wei.fang@nxp.com>
|
||||
R: Shenwei Wang <shenwei.wang@nxp.com>
|
||||
R: Clark Wang <xiaoning.wang@nxp.com>
|
||||
R: NXP Linux Team <linux-imx@nxp.com>
|
||||
L: netdev@vger.kernel.org
|
||||
S: Maintained
|
||||
F: Documentation/devicetree/bindings/net/fsl,fec.yaml
|
||||
|
@ -1005,9 +1005,11 @@ static void lan9303_get_ethtool_stats(struct dsa_switch *ds, int port,
|
||||
ret = lan9303_read_switch_port(
|
||||
chip, port, lan9303_mib[u].offset, ®);
|
||||
|
||||
if (ret)
|
||||
if (ret) {
|
||||
dev_warn(chip->dev, "Reading status port %d reg %u failed\n",
|
||||
port, lan9303_mib[u].offset);
|
||||
reg = 0;
|
||||
}
|
||||
data[u] = reg;
|
||||
}
|
||||
}
|
||||
|
@ -824,7 +824,7 @@ lance_start_xmit(struct sk_buff *skb, struct net_device *dev)
|
||||
lp->memcpy_f( PKTBUF_ADDR(head), (void *)skb->data, skb->len );
|
||||
head->flag = TMD1_OWN_CHIP | TMD1_ENP | TMD1_STP;
|
||||
dev->stats.tx_bytes += skb->len;
|
||||
dev_kfree_skb( skb );
|
||||
dev_consume_skb_irq(skb);
|
||||
lp->cur_tx++;
|
||||
while( lp->cur_tx >= TX_RING_SIZE && lp->dirty_tx >= TX_RING_SIZE ) {
|
||||
lp->cur_tx -= TX_RING_SIZE;
|
||||
|
@ -1001,7 +1001,7 @@ static netdev_tx_t lance_start_xmit(struct sk_buff *skb,
|
||||
skb_copy_from_linear_data(skb, &lp->tx_bounce_buffs[entry], skb->len);
|
||||
lp->tx_ring[entry].base =
|
||||
((u32)isa_virt_to_bus((lp->tx_bounce_buffs + entry)) & 0xffffff) | 0x83000000;
|
||||
dev_kfree_skb(skb);
|
||||
dev_consume_skb_irq(skb);
|
||||
} else {
|
||||
lp->tx_skbuff[entry] = skb;
|
||||
lp->tx_ring[entry].base = ((u32)isa_virt_to_bus(skb->data) & 0xffffff) | 0x83000000;
|
||||
|
@ -189,6 +189,7 @@ enum xgbe_sfp_cable {
|
||||
XGBE_SFP_CABLE_UNKNOWN = 0,
|
||||
XGBE_SFP_CABLE_ACTIVE,
|
||||
XGBE_SFP_CABLE_PASSIVE,
|
||||
XGBE_SFP_CABLE_FIBER,
|
||||
};
|
||||
|
||||
enum xgbe_sfp_base {
|
||||
@ -236,10 +237,7 @@ enum xgbe_sfp_speed {
|
||||
|
||||
#define XGBE_SFP_BASE_BR 12
|
||||
#define XGBE_SFP_BASE_BR_1GBE_MIN 0x0a
|
||||
#define XGBE_SFP_BASE_BR_1GBE_MAX 0x0d
|
||||
#define XGBE_SFP_BASE_BR_10GBE_MIN 0x64
|
||||
#define XGBE_SFP_BASE_BR_10GBE_MAX 0x68
|
||||
#define XGBE_MOLEX_SFP_BASE_BR_10GBE_MAX 0x78
|
||||
|
||||
#define XGBE_SFP_BASE_CU_CABLE_LEN 18
|
||||
|
||||
@ -826,29 +824,22 @@ static void xgbe_phy_sfp_phy_settings(struct xgbe_prv_data *pdata)
|
||||
static bool xgbe_phy_sfp_bit_rate(struct xgbe_sfp_eeprom *sfp_eeprom,
|
||||
enum xgbe_sfp_speed sfp_speed)
|
||||
{
|
||||
u8 *sfp_base, min, max;
|
||||
u8 *sfp_base, min;
|
||||
|
||||
sfp_base = sfp_eeprom->base;
|
||||
|
||||
switch (sfp_speed) {
|
||||
case XGBE_SFP_SPEED_1000:
|
||||
min = XGBE_SFP_BASE_BR_1GBE_MIN;
|
||||
max = XGBE_SFP_BASE_BR_1GBE_MAX;
|
||||
break;
|
||||
case XGBE_SFP_SPEED_10000:
|
||||
min = XGBE_SFP_BASE_BR_10GBE_MIN;
|
||||
if (memcmp(&sfp_eeprom->base[XGBE_SFP_BASE_VENDOR_NAME],
|
||||
XGBE_MOLEX_VENDOR, XGBE_SFP_BASE_VENDOR_NAME_LEN) == 0)
|
||||
max = XGBE_MOLEX_SFP_BASE_BR_10GBE_MAX;
|
||||
else
|
||||
max = XGBE_SFP_BASE_BR_10GBE_MAX;
|
||||
break;
|
||||
default:
|
||||
return false;
|
||||
}
|
||||
|
||||
return ((sfp_base[XGBE_SFP_BASE_BR] >= min) &&
|
||||
(sfp_base[XGBE_SFP_BASE_BR] <= max));
|
||||
return sfp_base[XGBE_SFP_BASE_BR] >= min;
|
||||
}
|
||||
|
||||
static void xgbe_phy_free_phy_device(struct xgbe_prv_data *pdata)
|
||||
@ -1149,16 +1140,18 @@ static void xgbe_phy_sfp_parse_eeprom(struct xgbe_prv_data *pdata)
|
||||
phy_data->sfp_tx_fault = xgbe_phy_check_sfp_tx_fault(phy_data);
|
||||
phy_data->sfp_rx_los = xgbe_phy_check_sfp_rx_los(phy_data);
|
||||
|
||||
/* Assume ACTIVE cable unless told it is PASSIVE */
|
||||
/* Assume FIBER cable unless told otherwise */
|
||||
if (sfp_base[XGBE_SFP_BASE_CABLE] & XGBE_SFP_BASE_CABLE_PASSIVE) {
|
||||
phy_data->sfp_cable = XGBE_SFP_CABLE_PASSIVE;
|
||||
phy_data->sfp_cable_len = sfp_base[XGBE_SFP_BASE_CU_CABLE_LEN];
|
||||
} else {
|
||||
} else if (sfp_base[XGBE_SFP_BASE_CABLE] & XGBE_SFP_BASE_CABLE_ACTIVE) {
|
||||
phy_data->sfp_cable = XGBE_SFP_CABLE_ACTIVE;
|
||||
} else {
|
||||
phy_data->sfp_cable = XGBE_SFP_CABLE_FIBER;
|
||||
}
|
||||
|
||||
/* Determine the type of SFP */
|
||||
if (phy_data->sfp_cable == XGBE_SFP_CABLE_PASSIVE &&
|
||||
if (phy_data->sfp_cable != XGBE_SFP_CABLE_FIBER &&
|
||||
xgbe_phy_sfp_bit_rate(sfp_eeprom, XGBE_SFP_SPEED_10000))
|
||||
phy_data->sfp_base = XGBE_SFP_BASE_10000_CR;
|
||||
else if (sfp_base[XGBE_SFP_BASE_10GBE_CC] & XGBE_SFP_BASE_10GBE_CC_SR)
|
||||
|
@ -1510,7 +1510,7 @@ static void bmac_tx_timeout(struct timer_list *t)
|
||||
i = bp->tx_empty;
|
||||
++dev->stats.tx_errors;
|
||||
if (i != bp->tx_fill) {
|
||||
dev_kfree_skb(bp->tx_bufs[i]);
|
||||
dev_kfree_skb_irq(bp->tx_bufs[i]);
|
||||
bp->tx_bufs[i] = NULL;
|
||||
if (++i >= N_TX_RING) i = 0;
|
||||
bp->tx_empty = i;
|
||||
|
@ -846,7 +846,7 @@ static void mace_tx_timeout(struct timer_list *t)
|
||||
if (mp->tx_bad_runt) {
|
||||
mp->tx_bad_runt = 0;
|
||||
} else if (i != mp->tx_fill) {
|
||||
dev_kfree_skb(mp->tx_bufs[i]);
|
||||
dev_kfree_skb_irq(mp->tx_bufs[i]);
|
||||
if (++i >= N_TX_RING)
|
||||
i = 0;
|
||||
mp->tx_empty = i;
|
||||
|
@ -550,11 +550,11 @@ static netdev_tx_t dnet_start_xmit(struct sk_buff *skb, struct net_device *dev)
|
||||
|
||||
skb_tx_timestamp(skb);
|
||||
|
||||
spin_unlock_irqrestore(&bp->lock, flags);
|
||||
|
||||
/* free the buffer */
|
||||
dev_kfree_skb(skb);
|
||||
|
||||
spin_unlock_irqrestore(&bp->lock, flags);
|
||||
|
||||
return NETDEV_TX_OK;
|
||||
}
|
||||
|
||||
|
@ -3693,6 +3693,24 @@ static int i40e_vsi_configure_tx(struct i40e_vsi *vsi)
|
||||
return err;
|
||||
}
|
||||
|
||||
/**
|
||||
* i40e_calculate_vsi_rx_buf_len - Calculates buffer length
|
||||
*
|
||||
* @vsi: VSI to calculate rx_buf_len from
|
||||
*/
|
||||
static u16 i40e_calculate_vsi_rx_buf_len(struct i40e_vsi *vsi)
|
||||
{
|
||||
if (!vsi->netdev || (vsi->back->flags & I40E_FLAG_LEGACY_RX))
|
||||
return I40E_RXBUFFER_2048;
|
||||
|
||||
#if (PAGE_SIZE < 8192)
|
||||
if (!I40E_2K_TOO_SMALL_WITH_PADDING && vsi->netdev->mtu <= ETH_DATA_LEN)
|
||||
return I40E_RXBUFFER_1536 - NET_IP_ALIGN;
|
||||
#endif
|
||||
|
||||
return PAGE_SIZE < 8192 ? I40E_RXBUFFER_3072 : I40E_RXBUFFER_2048;
|
||||
}
|
||||
|
||||
/**
|
||||
* i40e_vsi_configure_rx - Configure the VSI for Rx
|
||||
* @vsi: the VSI being configured
|
||||
@ -3704,20 +3722,14 @@ static int i40e_vsi_configure_rx(struct i40e_vsi *vsi)
|
||||
int err = 0;
|
||||
u16 i;
|
||||
|
||||
if (!vsi->netdev || (vsi->back->flags & I40E_FLAG_LEGACY_RX)) {
|
||||
vsi->max_frame = I40E_MAX_RXBUFFER;
|
||||
vsi->rx_buf_len = I40E_RXBUFFER_2048;
|
||||
vsi->max_frame = I40E_MAX_RXBUFFER;
|
||||
vsi->rx_buf_len = i40e_calculate_vsi_rx_buf_len(vsi);
|
||||
|
||||
#if (PAGE_SIZE < 8192)
|
||||
} else if (!I40E_2K_TOO_SMALL_WITH_PADDING &&
|
||||
(vsi->netdev->mtu <= ETH_DATA_LEN)) {
|
||||
if (vsi->netdev && !I40E_2K_TOO_SMALL_WITH_PADDING &&
|
||||
vsi->netdev->mtu <= ETH_DATA_LEN)
|
||||
vsi->max_frame = I40E_RXBUFFER_1536 - NET_IP_ALIGN;
|
||||
vsi->rx_buf_len = I40E_RXBUFFER_1536 - NET_IP_ALIGN;
|
||||
#endif
|
||||
} else {
|
||||
vsi->max_frame = I40E_MAX_RXBUFFER;
|
||||
vsi->rx_buf_len = (PAGE_SIZE < 8192) ? I40E_RXBUFFER_3072 :
|
||||
I40E_RXBUFFER_2048;
|
||||
}
|
||||
|
||||
/* set up individual rings */
|
||||
for (i = 0; i < vsi->num_queue_pairs && !err; i++)
|
||||
@ -13309,7 +13321,7 @@ static int i40e_xdp_setup(struct i40e_vsi *vsi, struct bpf_prog *prog,
|
||||
int i;
|
||||
|
||||
/* Don't allow frames that span over multiple buffers */
|
||||
if (frame_size > vsi->rx_buf_len) {
|
||||
if (frame_size > i40e_calculate_vsi_rx_buf_len(vsi)) {
|
||||
NL_SET_ERR_MSG_MOD(extack, "MTU too large to enable XDP");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
@ -1184,10 +1184,13 @@ static int mcs_register_interrupts(struct mcs *mcs)
|
||||
mcs->tx_sa_active = alloc_mem(mcs, mcs->hw->sc_entries);
|
||||
if (!mcs->tx_sa_active) {
|
||||
ret = -ENOMEM;
|
||||
goto exit;
|
||||
goto free_irq;
|
||||
}
|
||||
|
||||
return ret;
|
||||
|
||||
free_irq:
|
||||
free_irq(pci_irq_vector(mcs->pdev, MCS_INT_VEC_IP), mcs);
|
||||
exit:
|
||||
pci_free_irq_vectors(mcs->pdev);
|
||||
mcs->num_vec = 0;
|
||||
@ -1589,6 +1592,7 @@ static void mcs_remove(struct pci_dev *pdev)
|
||||
|
||||
/* Set MCS to external bypass */
|
||||
mcs_set_external_bypass(mcs, true);
|
||||
free_irq(pci_irq_vector(pdev, MCS_INT_VEC_IP), mcs);
|
||||
pci_free_irq_vectors(pdev);
|
||||
pci_release_regions(pdev);
|
||||
pci_disable_device(pdev);
|
||||
|
@ -2386,7 +2386,7 @@ static void free_tx_buffers(struct s2io_nic *nic)
|
||||
skb = s2io_txdl_getskb(&mac_control->fifos[i], txdp, j);
|
||||
if (skb) {
|
||||
swstats->mem_freed += skb->truesize;
|
||||
dev_kfree_skb(skb);
|
||||
dev_kfree_skb_irq(skb);
|
||||
cnt++;
|
||||
}
|
||||
}
|
||||
|
@ -221,6 +221,8 @@ int qlcnic_sriov_init(struct qlcnic_adapter *adapter, int num_vfs)
|
||||
return 0;
|
||||
|
||||
qlcnic_destroy_async_wq:
|
||||
while (i--)
|
||||
kfree(sriov->vf_info[i].vp);
|
||||
destroy_workqueue(bc->bc_async_wq);
|
||||
|
||||
qlcnic_destroy_trans_wq:
|
||||
|
@ -47,7 +47,8 @@ static void config_sub_second_increment(void __iomem *ioaddr,
|
||||
if (!(value & PTP_TCR_TSCTRLSSR))
|
||||
data = (data * 1000) / 465;
|
||||
|
||||
data &= PTP_SSIR_SSINC_MASK;
|
||||
if (data > PTP_SSIR_SSINC_MAX)
|
||||
data = PTP_SSIR_SSINC_MAX;
|
||||
|
||||
reg_value = data;
|
||||
if (gmac4)
|
||||
|
@ -7099,7 +7099,7 @@ int stmmac_dvr_probe(struct device *device,
|
||||
priv->wq = create_singlethread_workqueue("stmmac_wq");
|
||||
if (!priv->wq) {
|
||||
dev_err(priv->device, "failed to create workqueue\n");
|
||||
return -ENOMEM;
|
||||
goto error_wq_init;
|
||||
}
|
||||
|
||||
INIT_WORK(&priv->service_task, stmmac_service_task);
|
||||
@ -7327,6 +7327,7 @@ error_mdio_register:
|
||||
stmmac_napi_del(ndev);
|
||||
error_hw_init:
|
||||
destroy_workqueue(priv->wq);
|
||||
error_wq_init:
|
||||
bitmap_free(priv->af_xdp_zc_qps);
|
||||
|
||||
return ret;
|
||||
|
@ -64,7 +64,7 @@
|
||||
#define PTP_TCR_TSENMACADDR BIT(18)
|
||||
|
||||
/* SSIR defines */
|
||||
#define PTP_SSIR_SSINC_MASK 0xff
|
||||
#define PTP_SSIR_SSINC_MAX 0xff
|
||||
#define GMAC4_PTP_SSIR_SSINC_SHIFT 16
|
||||
|
||||
/* Auxiliary Control defines */
|
||||
|
@ -1654,12 +1654,16 @@ static int stmmac_test_arpoffload(struct stmmac_priv *priv)
|
||||
}
|
||||
|
||||
ret = stmmac_set_arp_offload(priv, priv->hw, true, ip_addr);
|
||||
if (ret)
|
||||
if (ret) {
|
||||
kfree_skb(skb);
|
||||
goto cleanup;
|
||||
}
|
||||
|
||||
ret = dev_set_promiscuity(priv->dev, 1);
|
||||
if (ret)
|
||||
if (ret) {
|
||||
kfree_skb(skb);
|
||||
goto cleanup;
|
||||
}
|
||||
|
||||
ret = dev_direct_xmit(skb, 0);
|
||||
if (ret)
|
||||
|
@ -536,7 +536,7 @@ static void xemaclite_tx_timeout(struct net_device *dev, unsigned int txqueue)
|
||||
xemaclite_enable_interrupts(lp);
|
||||
|
||||
if (lp->deferred_skb) {
|
||||
dev_kfree_skb(lp->deferred_skb);
|
||||
dev_kfree_skb_irq(lp->deferred_skb);
|
||||
lp->deferred_skb = NULL;
|
||||
dev->stats.tx_errors++;
|
||||
}
|
||||
|
@ -3831,10 +3831,24 @@ static int dfx_init(void)
|
||||
int status;
|
||||
|
||||
status = pci_register_driver(&dfx_pci_driver);
|
||||
if (!status)
|
||||
status = eisa_driver_register(&dfx_eisa_driver);
|
||||
if (!status)
|
||||
status = tc_register_driver(&dfx_tc_driver);
|
||||
if (status)
|
||||
goto err_pci_register;
|
||||
|
||||
status = eisa_driver_register(&dfx_eisa_driver);
|
||||
if (status)
|
||||
goto err_eisa_register;
|
||||
|
||||
status = tc_register_driver(&dfx_tc_driver);
|
||||
if (status)
|
||||
goto err_tc_register;
|
||||
|
||||
return 0;
|
||||
|
||||
err_tc_register:
|
||||
eisa_driver_unregister(&dfx_eisa_driver);
|
||||
err_eisa_register:
|
||||
pci_unregister_driver(&dfx_pci_driver);
|
||||
err_pci_register:
|
||||
return status;
|
||||
}
|
||||
|
||||
|
@ -302,12 +302,12 @@ static inline void scc_discard_buffers(struct scc_channel *scc)
|
||||
spin_lock_irqsave(&scc->lock, flags);
|
||||
if (scc->tx_buff != NULL)
|
||||
{
|
||||
dev_kfree_skb(scc->tx_buff);
|
||||
dev_kfree_skb_irq(scc->tx_buff);
|
||||
scc->tx_buff = NULL;
|
||||
}
|
||||
|
||||
while (!skb_queue_empty(&scc->tx_queue))
|
||||
dev_kfree_skb(skb_dequeue(&scc->tx_queue));
|
||||
dev_kfree_skb_irq(skb_dequeue(&scc->tx_queue));
|
||||
|
||||
spin_unlock_irqrestore(&scc->lock, flags);
|
||||
}
|
||||
@ -1668,7 +1668,7 @@ static netdev_tx_t scc_net_tx(struct sk_buff *skb, struct net_device *dev)
|
||||
if (skb_queue_len(&scc->tx_queue) > scc->dev->tx_queue_len) {
|
||||
struct sk_buff *skb_del;
|
||||
skb_del = skb_dequeue(&scc->tx_queue);
|
||||
dev_kfree_skb(skb_del);
|
||||
dev_kfree_skb_irq(skb_del);
|
||||
}
|
||||
skb_queue_tail(&scc->tx_queue, skb);
|
||||
netif_trans_update(dev);
|
||||
|
@ -137,7 +137,7 @@ static void ntb_netdev_rx_handler(struct ntb_transport_qp *qp, void *qp_data,
|
||||
enqueue_again:
|
||||
rc = ntb_transport_rx_enqueue(qp, skb, skb->data, ndev->mtu + ETH_HLEN);
|
||||
if (rc) {
|
||||
dev_kfree_skb(skb);
|
||||
dev_kfree_skb_any(skb);
|
||||
ndev->stats.rx_errors++;
|
||||
ndev->stats.rx_fifo_errors++;
|
||||
}
|
||||
@ -192,7 +192,7 @@ static void ntb_netdev_tx_handler(struct ntb_transport_qp *qp, void *qp_data,
|
||||
ndev->stats.tx_aborted_errors++;
|
||||
}
|
||||
|
||||
dev_kfree_skb(skb);
|
||||
dev_kfree_skb_any(skb);
|
||||
|
||||
if (ntb_transport_tx_free_entry(dev->qp) >= tx_start) {
|
||||
/* Make sure anybody stopping the queue after this sees the new
|
||||
|
@ -2545,6 +2545,7 @@ fst_remove_one(struct pci_dev *pdev)
|
||||
struct net_device *dev = port_to_dev(&card->ports[i]);
|
||||
|
||||
unregister_hdlc_device(dev);
|
||||
free_netdev(dev);
|
||||
}
|
||||
|
||||
fst_disable_intr(card);
|
||||
@ -2564,6 +2565,7 @@ fst_remove_one(struct pci_dev *pdev)
|
||||
card->tx_dma_handle_card);
|
||||
}
|
||||
fst_card_array[card->card_no] = NULL;
|
||||
kfree(card);
|
||||
}
|
||||
|
||||
static struct pci_driver fst_driver = {
|
||||
|
@ -97,8 +97,6 @@ struct mptcp_out_options {
|
||||
};
|
||||
|
||||
#ifdef CONFIG_MPTCP
|
||||
extern struct request_sock_ops mptcp_subflow_request_sock_ops;
|
||||
|
||||
void mptcp_init(void);
|
||||
|
||||
static inline bool sk_is_mptcp(const struct sock *sk)
|
||||
@ -188,6 +186,9 @@ void mptcp_seq_show(struct seq_file *seq);
|
||||
int mptcp_subflow_init_cookie_req(struct request_sock *req,
|
||||
const struct sock *sk_listener,
|
||||
struct sk_buff *skb);
|
||||
struct request_sock *mptcp_subflow_reqsk_alloc(const struct request_sock_ops *ops,
|
||||
struct sock *sk_listener,
|
||||
bool attach_listener);
|
||||
|
||||
__be32 mptcp_get_reset_option(const struct sk_buff *skb);
|
||||
|
||||
@ -274,6 +275,13 @@ static inline int mptcp_subflow_init_cookie_req(struct request_sock *req,
|
||||
return 0; /* TCP fallback */
|
||||
}
|
||||
|
||||
static inline struct request_sock *mptcp_subflow_reqsk_alloc(const struct request_sock_ops *ops,
|
||||
struct sock *sk_listener,
|
||||
bool attach_listener)
|
||||
{
|
||||
return NULL;
|
||||
}
|
||||
|
||||
static inline __be32 mptcp_reset_option(const struct sk_buff *skb) { return htonl(0u); }
|
||||
#endif /* CONFIG_MPTCP */
|
||||
|
||||
|
@ -415,6 +415,7 @@ static void dsa_tag_8021q_teardown(struct dsa_switch *ds)
|
||||
int dsa_tag_8021q_register(struct dsa_switch *ds, __be16 proto)
|
||||
{
|
||||
struct dsa_8021q_context *ctx;
|
||||
int err;
|
||||
|
||||
ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
|
||||
if (!ctx)
|
||||
@ -427,7 +428,15 @@ int dsa_tag_8021q_register(struct dsa_switch *ds, __be16 proto)
|
||||
|
||||
ds->tag_8021q_ctx = ctx;
|
||||
|
||||
return dsa_tag_8021q_setup(ds);
|
||||
err = dsa_tag_8021q_setup(ds);
|
||||
if (err)
|
||||
goto err_free;
|
||||
|
||||
return 0;
|
||||
|
||||
err_free:
|
||||
kfree(ctx);
|
||||
return err;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(dsa_tag_8021q_register);
|
||||
|
||||
|
@ -288,12 +288,11 @@ struct request_sock *cookie_tcp_reqsk_alloc(const struct request_sock_ops *ops,
|
||||
struct tcp_request_sock *treq;
|
||||
struct request_sock *req;
|
||||
|
||||
#ifdef CONFIG_MPTCP
|
||||
if (sk_is_mptcp(sk))
|
||||
ops = &mptcp_subflow_request_sock_ops;
|
||||
#endif
|
||||
req = mptcp_subflow_reqsk_alloc(ops, sk, false);
|
||||
else
|
||||
req = inet_reqsk_alloc(ops, sk, false);
|
||||
|
||||
req = inet_reqsk_alloc(ops, sk, false);
|
||||
if (!req)
|
||||
return NULL;
|
||||
|
||||
|
@ -176,6 +176,7 @@ EXPORT_SYMBOL_GPL(udp_tunnel_xmit_skb);
|
||||
void udp_tunnel_sock_release(struct socket *sock)
|
||||
{
|
||||
rcu_assign_sk_user_data(sock->sk, NULL);
|
||||
synchronize_rcu();
|
||||
kernel_sock_shutdown(sock, SHUT_RDWR);
|
||||
sock_release(sock);
|
||||
}
|
||||
|
@ -42,24 +42,29 @@ static void ip6_datagram_flow_key_init(struct flowi6 *fl6, struct sock *sk)
|
||||
{
|
||||
struct inet_sock *inet = inet_sk(sk);
|
||||
struct ipv6_pinfo *np = inet6_sk(sk);
|
||||
int oif = sk->sk_bound_dev_if;
|
||||
|
||||
memset(fl6, 0, sizeof(*fl6));
|
||||
fl6->flowi6_proto = sk->sk_protocol;
|
||||
fl6->daddr = sk->sk_v6_daddr;
|
||||
fl6->saddr = np->saddr;
|
||||
fl6->flowi6_oif = sk->sk_bound_dev_if;
|
||||
fl6->flowi6_mark = sk->sk_mark;
|
||||
fl6->fl6_dport = inet->inet_dport;
|
||||
fl6->fl6_sport = inet->inet_sport;
|
||||
fl6->flowlabel = np->flow_label;
|
||||
fl6->flowi6_uid = sk->sk_uid;
|
||||
|
||||
if (!fl6->flowi6_oif)
|
||||
fl6->flowi6_oif = np->sticky_pktinfo.ipi6_ifindex;
|
||||
if (!oif)
|
||||
oif = np->sticky_pktinfo.ipi6_ifindex;
|
||||
|
||||
if (!fl6->flowi6_oif && ipv6_addr_is_multicast(&fl6->daddr))
|
||||
fl6->flowi6_oif = np->mcast_oif;
|
||||
if (!oif) {
|
||||
if (ipv6_addr_is_multicast(&fl6->daddr))
|
||||
oif = np->mcast_oif;
|
||||
else
|
||||
oif = np->ucast_oif;
|
||||
}
|
||||
|
||||
fl6->flowi6_oif = oif;
|
||||
security_sk_classify_flow(sk, flowi6_to_flowi_common(fl6));
|
||||
}
|
||||
|
||||
|
@ -156,6 +156,7 @@ int mptcp_nl_cmd_announce(struct sk_buff *skb, struct genl_info *info)
|
||||
|
||||
if (addr_val.addr.id == 0 || !(addr_val.flags & MPTCP_PM_ADDR_FLAG_SIGNAL)) {
|
||||
GENL_SET_ERR_MSG(info, "invalid addr id or flags");
|
||||
err = -EINVAL;
|
||||
goto announce_err;
|
||||
}
|
||||
|
||||
@ -282,6 +283,7 @@ int mptcp_nl_cmd_sf_create(struct sk_buff *skb, struct genl_info *info)
|
||||
|
||||
if (addr_l.id == 0) {
|
||||
NL_SET_ERR_MSG_ATTR(info->extack, laddr, "missing local addr id");
|
||||
err = -EINVAL;
|
||||
goto create_err;
|
||||
}
|
||||
|
||||
@ -395,11 +397,13 @@ int mptcp_nl_cmd_sf_destroy(struct sk_buff *skb, struct genl_info *info)
|
||||
|
||||
if (addr_l.family != addr_r.family) {
|
||||
GENL_SET_ERR_MSG(info, "address families do not match");
|
||||
err = -EINVAL;
|
||||
goto destroy_err;
|
||||
}
|
||||
|
||||
if (!addr_l.port || !addr_r.port) {
|
||||
GENL_SET_ERR_MSG(info, "missing local or remote port");
|
||||
err = -EINVAL;
|
||||
goto destroy_err;
|
||||
}
|
||||
|
||||
|
@ -45,7 +45,6 @@ static void subflow_req_destructor(struct request_sock *req)
|
||||
sock_put((struct sock *)subflow_req->msk);
|
||||
|
||||
mptcp_token_destroy_request(req);
|
||||
tcp_request_sock_ops.destructor(req);
|
||||
}
|
||||
|
||||
static void subflow_generate_hmac(u64 key1, u64 key2, u32 nonce1, u32 nonce2,
|
||||
@ -590,7 +589,7 @@ static int subflow_v6_rebuild_header(struct sock *sk)
|
||||
}
|
||||
#endif
|
||||
|
||||
struct request_sock_ops mptcp_subflow_request_sock_ops;
|
||||
static struct request_sock_ops mptcp_subflow_v4_request_sock_ops __ro_after_init;
|
||||
static struct tcp_request_sock_ops subflow_request_sock_ipv4_ops __ro_after_init;
|
||||
|
||||
static int subflow_v4_conn_request(struct sock *sk, struct sk_buff *skb)
|
||||
@ -603,7 +602,7 @@ static int subflow_v4_conn_request(struct sock *sk, struct sk_buff *skb)
|
||||
if (skb_rtable(skb)->rt_flags & (RTCF_BROADCAST | RTCF_MULTICAST))
|
||||
goto drop;
|
||||
|
||||
return tcp_conn_request(&mptcp_subflow_request_sock_ops,
|
||||
return tcp_conn_request(&mptcp_subflow_v4_request_sock_ops,
|
||||
&subflow_request_sock_ipv4_ops,
|
||||
sk, skb);
|
||||
drop:
|
||||
@ -611,7 +610,14 @@ drop:
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void subflow_v4_req_destructor(struct request_sock *req)
|
||||
{
|
||||
subflow_req_destructor(req);
|
||||
tcp_request_sock_ops.destructor(req);
|
||||
}
|
||||
|
||||
#if IS_ENABLED(CONFIG_MPTCP_IPV6)
|
||||
static struct request_sock_ops mptcp_subflow_v6_request_sock_ops __ro_after_init;
|
||||
static struct tcp_request_sock_ops subflow_request_sock_ipv6_ops __ro_after_init;
|
||||
static struct inet_connection_sock_af_ops subflow_v6_specific __ro_after_init;
|
||||
static struct inet_connection_sock_af_ops subflow_v6m_specific __ro_after_init;
|
||||
@ -634,15 +640,36 @@ static int subflow_v6_conn_request(struct sock *sk, struct sk_buff *skb)
|
||||
return 0;
|
||||
}
|
||||
|
||||
return tcp_conn_request(&mptcp_subflow_request_sock_ops,
|
||||
return tcp_conn_request(&mptcp_subflow_v6_request_sock_ops,
|
||||
&subflow_request_sock_ipv6_ops, sk, skb);
|
||||
|
||||
drop:
|
||||
tcp_listendrop(sk);
|
||||
return 0; /* don't send reset */
|
||||
}
|
||||
|
||||
static void subflow_v6_req_destructor(struct request_sock *req)
|
||||
{
|
||||
subflow_req_destructor(req);
|
||||
tcp6_request_sock_ops.destructor(req);
|
||||
}
|
||||
#endif
|
||||
|
||||
struct request_sock *mptcp_subflow_reqsk_alloc(const struct request_sock_ops *ops,
|
||||
struct sock *sk_listener,
|
||||
bool attach_listener)
|
||||
{
|
||||
if (ops->family == AF_INET)
|
||||
ops = &mptcp_subflow_v4_request_sock_ops;
|
||||
#if IS_ENABLED(CONFIG_MPTCP_IPV6)
|
||||
else if (ops->family == AF_INET6)
|
||||
ops = &mptcp_subflow_v6_request_sock_ops;
|
||||
#endif
|
||||
|
||||
return inet_reqsk_alloc(ops, sk_listener, attach_listener);
|
||||
}
|
||||
EXPORT_SYMBOL(mptcp_subflow_reqsk_alloc);
|
||||
|
||||
/* validate hmac received in third ACK */
|
||||
static bool subflow_hmac_valid(const struct request_sock *req,
|
||||
const struct mptcp_options_received *mp_opt)
|
||||
@ -1963,7 +1990,6 @@ static struct tcp_ulp_ops subflow_ulp_ops __read_mostly = {
|
||||
static int subflow_ops_init(struct request_sock_ops *subflow_ops)
|
||||
{
|
||||
subflow_ops->obj_size = sizeof(struct mptcp_subflow_request_sock);
|
||||
subflow_ops->slab_name = "request_sock_subflow";
|
||||
|
||||
subflow_ops->slab = kmem_cache_create(subflow_ops->slab_name,
|
||||
subflow_ops->obj_size, 0,
|
||||
@ -1973,16 +1999,17 @@ static int subflow_ops_init(struct request_sock_ops *subflow_ops)
|
||||
if (!subflow_ops->slab)
|
||||
return -ENOMEM;
|
||||
|
||||
subflow_ops->destructor = subflow_req_destructor;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
void __init mptcp_subflow_init(void)
|
||||
{
|
||||
mptcp_subflow_request_sock_ops = tcp_request_sock_ops;
|
||||
if (subflow_ops_init(&mptcp_subflow_request_sock_ops) != 0)
|
||||
panic("MPTCP: failed to init subflow request sock ops\n");
|
||||
mptcp_subflow_v4_request_sock_ops = tcp_request_sock_ops;
|
||||
mptcp_subflow_v4_request_sock_ops.slab_name = "request_sock_subflow_v4";
|
||||
mptcp_subflow_v4_request_sock_ops.destructor = subflow_v4_req_destructor;
|
||||
|
||||
if (subflow_ops_init(&mptcp_subflow_v4_request_sock_ops) != 0)
|
||||
panic("MPTCP: failed to init subflow v4 request sock ops\n");
|
||||
|
||||
subflow_request_sock_ipv4_ops = tcp_request_sock_ipv4_ops;
|
||||
subflow_request_sock_ipv4_ops.route_req = subflow_v4_route_req;
|
||||
@ -1998,6 +2025,20 @@ void __init mptcp_subflow_init(void)
|
||||
tcp_prot_override.release_cb = tcp_release_cb_override;
|
||||
|
||||
#if IS_ENABLED(CONFIG_MPTCP_IPV6)
|
||||
/* In struct mptcp_subflow_request_sock, we assume the TCP request sock
|
||||
* structures for v4 and v6 have the same size. It should not changed in
|
||||
* the future but better to make sure to be warned if it is no longer
|
||||
* the case.
|
||||
*/
|
||||
BUILD_BUG_ON(sizeof(struct tcp_request_sock) != sizeof(struct tcp6_request_sock));
|
||||
|
||||
mptcp_subflow_v6_request_sock_ops = tcp6_request_sock_ops;
|
||||
mptcp_subflow_v6_request_sock_ops.slab_name = "request_sock_subflow_v6";
|
||||
mptcp_subflow_v6_request_sock_ops.destructor = subflow_v6_req_destructor;
|
||||
|
||||
if (subflow_ops_init(&mptcp_subflow_v6_request_sock_ops) != 0)
|
||||
panic("MPTCP: failed to init subflow v6 request sock ops\n");
|
||||
|
||||
subflow_request_sock_ipv6_ops = tcp_request_sock_ipv6_ops;
|
||||
subflow_request_sock_ipv6_ops.route_req = subflow_v6_route_req;
|
||||
subflow_request_sock_ipv6_ops.send_synack = subflow_v6_send_synack;
|
||||
|
@ -3738,6 +3738,7 @@ static int __init af_unix_init(void)
|
||||
rc = proto_register(&unix_stream_proto, 1);
|
||||
if (rc != 0) {
|
||||
pr_crit("%s: Cannot create unix_sock SLAB cache!\n", __func__);
|
||||
proto_unregister(&unix_dgram_proto);
|
||||
goto out;
|
||||
}
|
||||
|
||||
|
@ -1711,7 +1711,11 @@ static int vmci_transport_dgram_enqueue(
|
||||
if (!dg)
|
||||
return -ENOMEM;
|
||||
|
||||
memcpy_from_msg(VMCI_DG_PAYLOAD(dg), msg, len);
|
||||
err = memcpy_from_msg(VMCI_DG_PAYLOAD(dg), msg, len);
|
||||
if (err) {
|
||||
kfree(dg);
|
||||
return err;
|
||||
}
|
||||
|
||||
dg->dst = vmci_make_handle(remote_addr->svm_cid,
|
||||
remote_addr->svm_port);
|
||||
|
Loading…
Reference in New Issue
Block a user