Networking fixes for 5.18-rc8, including fixes from can, xfrm and
netfilter subtrees. Notably this reverts a recent TCP/DCCP netns-related change to address a possible UaF. Current release - regressions: - tcp: revert "tcp/dccp: get rid of inet_twsk_purge()" - xfrm: set dst dev to blackhole_netdev instead of loopback_dev in ifdown Previous releases - regressions: - netfilter: flowtable: fix TCP flow teardown - can: revert "can: m_can: pci: use custom bit timings for Elkhart Lake" - xfrm: check encryption module availability consistency - eth: vmxnet3: fix possible use-after-free bugs in vmxnet3_rq_alloc_rx_buf() - eth: mlx5: initialize flow steering during driver probe - eth: ice: fix crash when writing timestamp on RX rings Previous releases - always broken: - mptcp: fix checksum byte order - eth: lan966x: fix assignment of the MAC address - eth: mlx5: remove HW-GRO from reported features - eth: ftgmac100: disable hardware checksum on AST2600 Signed-off-by: Paolo Abeni <pabeni@redhat.com> -----BEGIN PGP SIGNATURE----- iQJGBAABCAAwFiEEg1AjqC77wbdLX2LbKSR5jcyPE6QFAmKGAYYSHHBhYmVuaUBy ZWRoYXQuY29tAAoJECkkeY3MjxOkrt8P/2GyYNQT7q0h3Plsxc/m1tIUCPiERROE zIU0R2QVc64xpkMISeVb3YYpa3eqhtQsNWgt7Xsr1NRXBmyx60dvGpS81w8Gnxuo ruA7SxnH6OA0usviiYPmeGP9emvCEkO5YRW5kxl1Cpum19yNxjfZKJ6ARk0IDp/D C1S91PYtF9s25Yytrlpv9lVVBvTHQxg2EQocZHxO+7/j2O8jJP/NAYltpVaRNC2W gLcOWTAujrjAfpdsBhJsWXv4dTCQOAgnIXYP9P1JdFMAZtkXoYQUjaXP7dsaAXHw iE9FBRkqDKVhj94CxR6VPOSo0kVvOuBfkc1eJeZ74lvahkHBq4EyiVCo6/JhNQTd /bi/mTeUlI9yYyu/j9lMDy4CwOuiB69Dl4vNR/G5C1rF7l1vQkZr50pnD96MePwu 9fR5+ipZsDhj5c77OMiraqnnOyWXVtD2YCZCCw80a9/aWG4zxcIDtnNQIfqAACvx 0wNgG2bPSKRablytep1Qs84Vvupaa1cC2eTBbA+6LzQqk3CR9/YMUSD6MXitxQyD RJYbm5QMqdW2QH8zE21E+8wzIPeN9m66lJFppuntuB+I/CHWAnP/CmdbWysR3FQ+ 5ZisPh4PUqb1VIzGKUbym/D9FB20Vc8zq6oQa8LqiIOODUrxQMg3F2O43OWsYsn3 TDNCwo5BQ/Z8 =C848 -----END PGP SIGNATURE----- Merge tag 'net-5.18-rc8' of git://git.kernel.org/pub/scm/linux/kernel/git/netdev/net Pull networking fixes from Paolo Abeni: "Including fixes from can, xfrm and netfilter subtrees. Notably this reverts a recent TCP/DCCP netns-related change to address a possible UaF. Current release - regressions: - tcp: revert "tcp/dccp: get rid of inet_twsk_purge()" - xfrm: set dst dev to blackhole_netdev instead of loopback_dev in ifdown Previous releases - regressions: - netfilter: flowtable: fix TCP flow teardown - can: revert "can: m_can: pci: use custom bit timings for Elkhart Lake" - xfrm: check encryption module availability consistency - eth: vmxnet3: fix possible use-after-free bugs in vmxnet3_rq_alloc_rx_buf() - eth: mlx5: initialize flow steering during driver probe - eth: ice: fix crash when writing timestamp on RX rings Previous releases - always broken: - mptcp: fix checksum byte order - eth: lan966x: fix assignment of the MAC address - eth: mlx5: remove HW-GRO from reported features - eth: ftgmac100: disable hardware checksum on AST2600" * tag 'net-5.18-rc8' of git://git.kernel.org/pub/scm/linux/kernel/git/netdev/net: (50 commits) net: bridge: Clear offload_fwd_mark when passing frame up bridge interface. ptp: ocp: change sysfs attr group handling selftests: forwarding: fix missing backslash netfilter: nf_tables: disable expression reduction infra netfilter: flowtable: move dst_check to packet path netfilter: flowtable: fix TCP flow teardown net: ftgmac100: Disable hardware checksum on AST2600 igb: skip phy status check where unavailable nfc: pn533: Fix buggy cleanup order mptcp: Do TCP fallback on early DSS checksum failure mptcp: fix checksum byte order net: af_key: check encryption module availability consistency net: af_key: add check for pfkey_broadcast in function pfkey_process net/mlx5: Drain fw_reset when removing device net/mlx5e: CT: Fix setting flow_source for smfs ct tuples net/mlx5e: CT: Fix support for GRE tuples net/mlx5e: Remove HW-GRO from reported features net/mlx5e: Properly block HW GRO when XDP is enabled net/mlx5e: Properly block LRO when XDP is enabled net/mlx5e: Block rx-gro-hw feature in switchdev mode ...
This commit is contained in:
commit
d904c8cc03
@ -1495,34 +1495,22 @@ static int m_can_dev_setup(struct m_can_classdev *cdev)
|
||||
err = can_set_static_ctrlmode(dev, CAN_CTRLMODE_FD_NON_ISO);
|
||||
if (err)
|
||||
return err;
|
||||
cdev->can.bittiming_const = cdev->bit_timing ?
|
||||
cdev->bit_timing : &m_can_bittiming_const_30X;
|
||||
|
||||
cdev->can.data_bittiming_const = cdev->data_timing ?
|
||||
cdev->data_timing :
|
||||
&m_can_data_bittiming_const_30X;
|
||||
cdev->can.bittiming_const = &m_can_bittiming_const_30X;
|
||||
cdev->can.data_bittiming_const = &m_can_data_bittiming_const_30X;
|
||||
break;
|
||||
case 31:
|
||||
/* CAN_CTRLMODE_FD_NON_ISO is fixed with M_CAN IP v3.1.x */
|
||||
err = can_set_static_ctrlmode(dev, CAN_CTRLMODE_FD_NON_ISO);
|
||||
if (err)
|
||||
return err;
|
||||
cdev->can.bittiming_const = cdev->bit_timing ?
|
||||
cdev->bit_timing : &m_can_bittiming_const_31X;
|
||||
|
||||
cdev->can.data_bittiming_const = cdev->data_timing ?
|
||||
cdev->data_timing :
|
||||
&m_can_data_bittiming_const_31X;
|
||||
cdev->can.bittiming_const = &m_can_bittiming_const_31X;
|
||||
cdev->can.data_bittiming_const = &m_can_data_bittiming_const_31X;
|
||||
break;
|
||||
case 32:
|
||||
case 33:
|
||||
/* Support both MCAN version v3.2.x and v3.3.0 */
|
||||
cdev->can.bittiming_const = cdev->bit_timing ?
|
||||
cdev->bit_timing : &m_can_bittiming_const_31X;
|
||||
|
||||
cdev->can.data_bittiming_const = cdev->data_timing ?
|
||||
cdev->data_timing :
|
||||
&m_can_data_bittiming_const_31X;
|
||||
cdev->can.bittiming_const = &m_can_bittiming_const_31X;
|
||||
cdev->can.data_bittiming_const = &m_can_data_bittiming_const_31X;
|
||||
|
||||
cdev->can.ctrlmode_supported |=
|
||||
(m_can_niso_supported(cdev) ?
|
||||
|
@ -85,9 +85,6 @@ struct m_can_classdev {
|
||||
struct sk_buff *tx_skb;
|
||||
struct phy *transceiver;
|
||||
|
||||
const struct can_bittiming_const *bit_timing;
|
||||
const struct can_bittiming_const *data_timing;
|
||||
|
||||
struct m_can_ops *ops;
|
||||
|
||||
int version;
|
||||
|
@ -18,14 +18,9 @@
|
||||
|
||||
#define M_CAN_PCI_MMIO_BAR 0
|
||||
|
||||
#define M_CAN_CLOCK_FREQ_EHL 200000000
|
||||
#define CTL_CSR_INT_CTL_OFFSET 0x508
|
||||
|
||||
struct m_can_pci_config {
|
||||
const struct can_bittiming_const *bit_timing;
|
||||
const struct can_bittiming_const *data_timing;
|
||||
unsigned int clock_freq;
|
||||
};
|
||||
|
||||
struct m_can_pci_priv {
|
||||
struct m_can_classdev cdev;
|
||||
|
||||
@ -89,40 +84,9 @@ static struct m_can_ops m_can_pci_ops = {
|
||||
.read_fifo = iomap_read_fifo,
|
||||
};
|
||||
|
||||
static const struct can_bittiming_const m_can_bittiming_const_ehl = {
|
||||
.name = KBUILD_MODNAME,
|
||||
.tseg1_min = 2, /* Time segment 1 = prop_seg + phase_seg1 */
|
||||
.tseg1_max = 64,
|
||||
.tseg2_min = 1, /* Time segment 2 = phase_seg2 */
|
||||
.tseg2_max = 128,
|
||||
.sjw_max = 128,
|
||||
.brp_min = 1,
|
||||
.brp_max = 512,
|
||||
.brp_inc = 1,
|
||||
};
|
||||
|
||||
static const struct can_bittiming_const m_can_data_bittiming_const_ehl = {
|
||||
.name = KBUILD_MODNAME,
|
||||
.tseg1_min = 2, /* Time segment 1 = prop_seg + phase_seg1 */
|
||||
.tseg1_max = 16,
|
||||
.tseg2_min = 1, /* Time segment 2 = phase_seg2 */
|
||||
.tseg2_max = 8,
|
||||
.sjw_max = 4,
|
||||
.brp_min = 1,
|
||||
.brp_max = 32,
|
||||
.brp_inc = 1,
|
||||
};
|
||||
|
||||
static const struct m_can_pci_config m_can_pci_ehl = {
|
||||
.bit_timing = &m_can_bittiming_const_ehl,
|
||||
.data_timing = &m_can_data_bittiming_const_ehl,
|
||||
.clock_freq = 200000000,
|
||||
};
|
||||
|
||||
static int m_can_pci_probe(struct pci_dev *pci, const struct pci_device_id *id)
|
||||
{
|
||||
struct device *dev = &pci->dev;
|
||||
const struct m_can_pci_config *cfg;
|
||||
struct m_can_classdev *mcan_class;
|
||||
struct m_can_pci_priv *priv;
|
||||
void __iomem *base;
|
||||
@ -150,8 +114,6 @@ static int m_can_pci_probe(struct pci_dev *pci, const struct pci_device_id *id)
|
||||
if (!mcan_class)
|
||||
return -ENOMEM;
|
||||
|
||||
cfg = (const struct m_can_pci_config *)id->driver_data;
|
||||
|
||||
priv = cdev_to_priv(mcan_class);
|
||||
|
||||
priv->base = base;
|
||||
@ -163,9 +125,7 @@ static int m_can_pci_probe(struct pci_dev *pci, const struct pci_device_id *id)
|
||||
mcan_class->dev = &pci->dev;
|
||||
mcan_class->net->irq = pci_irq_vector(pci, 0);
|
||||
mcan_class->pm_clock_support = 1;
|
||||
mcan_class->bit_timing = cfg->bit_timing;
|
||||
mcan_class->data_timing = cfg->data_timing;
|
||||
mcan_class->can.clock.freq = cfg->clock_freq;
|
||||
mcan_class->can.clock.freq = id->driver_data;
|
||||
mcan_class->ops = &m_can_pci_ops;
|
||||
|
||||
pci_set_drvdata(pci, mcan_class);
|
||||
@ -218,8 +178,8 @@ static SIMPLE_DEV_PM_OPS(m_can_pci_pm_ops,
|
||||
m_can_pci_suspend, m_can_pci_resume);
|
||||
|
||||
static const struct pci_device_id m_can_pci_id_table[] = {
|
||||
{ PCI_VDEVICE(INTEL, 0x4bc1), (kernel_ulong_t)&m_can_pci_ehl, },
|
||||
{ PCI_VDEVICE(INTEL, 0x4bc2), (kernel_ulong_t)&m_can_pci_ehl, },
|
||||
{ PCI_VDEVICE(INTEL, 0x4bc1), M_CAN_CLOCK_FREQ_EHL, },
|
||||
{ PCI_VDEVICE(INTEL, 0x4bc2), M_CAN_CLOCK_FREQ_EHL, },
|
||||
{ } /* Terminating Entry */
|
||||
};
|
||||
MODULE_DEVICE_TABLE(pci, m_can_pci_id_table);
|
||||
|
@ -2585,8 +2585,10 @@ static int bcm_sysport_probe(struct platform_device *pdev)
|
||||
device_set_wakeup_capable(&pdev->dev, 1);
|
||||
|
||||
priv->wol_clk = devm_clk_get_optional(&pdev->dev, "sw_sysportwol");
|
||||
if (IS_ERR(priv->wol_clk))
|
||||
return PTR_ERR(priv->wol_clk);
|
||||
if (IS_ERR(priv->wol_clk)) {
|
||||
ret = PTR_ERR(priv->wol_clk);
|
||||
goto err_deregister_fixed_link;
|
||||
}
|
||||
|
||||
/* Set the needed headroom once and for all */
|
||||
BUILD_BUG_ON(sizeof(struct bcm_tsb) != 8);
|
||||
|
@ -1219,7 +1219,6 @@ static void gem_rx_refill(struct macb_queue *queue)
|
||||
/* Make hw descriptor updates visible to CPU */
|
||||
rmb();
|
||||
|
||||
queue->rx_prepared_head++;
|
||||
desc = macb_rx_desc(queue, entry);
|
||||
|
||||
if (!queue->rx_skbuff[entry]) {
|
||||
@ -1258,6 +1257,7 @@ static void gem_rx_refill(struct macb_queue *queue)
|
||||
dma_wmb();
|
||||
desc->addr &= ~MACB_BIT(RX_USED);
|
||||
}
|
||||
queue->rx_prepared_head++;
|
||||
}
|
||||
|
||||
/* Make descriptor updates visible to hardware */
|
||||
|
@ -1928,6 +1928,11 @@ static int ftgmac100_probe(struct platform_device *pdev)
|
||||
/* AST2400 doesn't have working HW checksum generation */
|
||||
if (np && (of_device_is_compatible(np, "aspeed,ast2400-mac")))
|
||||
netdev->hw_features &= ~NETIF_F_HW_CSUM;
|
||||
|
||||
/* AST2600 tx checksum with NCSI is broken */
|
||||
if (priv->use_ncsi && of_device_is_compatible(np, "aspeed,ast2600-mac"))
|
||||
netdev->hw_features &= ~NETIF_F_HW_CSUM;
|
||||
|
||||
if (np && of_get_property(np, "no-hw-checksum", NULL))
|
||||
netdev->hw_features &= ~(NETIF_F_HW_CSUM | NETIF_F_RXCSUM);
|
||||
netdev->features |= netdev->hw_features;
|
||||
|
@ -3043,8 +3043,8 @@ ice_vsi_rebuild_get_coalesce(struct ice_vsi *vsi,
|
||||
ice_for_each_q_vector(vsi, i) {
|
||||
struct ice_q_vector *q_vector = vsi->q_vectors[i];
|
||||
|
||||
coalesce[i].itr_tx = q_vector->tx.itr_setting;
|
||||
coalesce[i].itr_rx = q_vector->rx.itr_setting;
|
||||
coalesce[i].itr_tx = q_vector->tx.itr_settings;
|
||||
coalesce[i].itr_rx = q_vector->rx.itr_settings;
|
||||
coalesce[i].intrl = q_vector->intrl;
|
||||
|
||||
if (i < vsi->num_txq)
|
||||
@ -3100,21 +3100,21 @@ ice_vsi_rebuild_set_coalesce(struct ice_vsi *vsi,
|
||||
*/
|
||||
if (i < vsi->alloc_rxq && coalesce[i].rx_valid) {
|
||||
rc = &vsi->q_vectors[i]->rx;
|
||||
rc->itr_setting = coalesce[i].itr_rx;
|
||||
rc->itr_settings = coalesce[i].itr_rx;
|
||||
ice_write_itr(rc, rc->itr_setting);
|
||||
} else if (i < vsi->alloc_rxq) {
|
||||
rc = &vsi->q_vectors[i]->rx;
|
||||
rc->itr_setting = coalesce[0].itr_rx;
|
||||
rc->itr_settings = coalesce[0].itr_rx;
|
||||
ice_write_itr(rc, rc->itr_setting);
|
||||
}
|
||||
|
||||
if (i < vsi->alloc_txq && coalesce[i].tx_valid) {
|
||||
rc = &vsi->q_vectors[i]->tx;
|
||||
rc->itr_setting = coalesce[i].itr_tx;
|
||||
rc->itr_settings = coalesce[i].itr_tx;
|
||||
ice_write_itr(rc, rc->itr_setting);
|
||||
} else if (i < vsi->alloc_txq) {
|
||||
rc = &vsi->q_vectors[i]->tx;
|
||||
rc->itr_setting = coalesce[0].itr_tx;
|
||||
rc->itr_settings = coalesce[0].itr_tx;
|
||||
ice_write_itr(rc, rc->itr_setting);
|
||||
}
|
||||
|
||||
@ -3128,12 +3128,12 @@ ice_vsi_rebuild_set_coalesce(struct ice_vsi *vsi,
|
||||
for (; i < vsi->num_q_vectors; i++) {
|
||||
/* transmit */
|
||||
rc = &vsi->q_vectors[i]->tx;
|
||||
rc->itr_setting = coalesce[0].itr_tx;
|
||||
rc->itr_settings = coalesce[0].itr_tx;
|
||||
ice_write_itr(rc, rc->itr_setting);
|
||||
|
||||
/* receive */
|
||||
rc = &vsi->q_vectors[i]->rx;
|
||||
rc->itr_setting = coalesce[0].itr_rx;
|
||||
rc->itr_settings = coalesce[0].itr_rx;
|
||||
ice_write_itr(rc, rc->itr_setting);
|
||||
|
||||
vsi->q_vectors[i]->intrl = coalesce[0].intrl;
|
||||
|
@ -6172,9 +6172,10 @@ static int ice_up_complete(struct ice_vsi *vsi)
|
||||
ice_ptp_link_change(pf, pf->hw.pf_id, true);
|
||||
}
|
||||
|
||||
/* clear this now, and the first stats read will be used as baseline */
|
||||
vsi->stat_offsets_loaded = false;
|
||||
|
||||
/* Perform an initial read of the statistics registers now to
|
||||
* set the baseline so counters are ready when interface is up
|
||||
*/
|
||||
ice_update_eth_stats(vsi);
|
||||
ice_service_task_schedule(pf);
|
||||
|
||||
return 0;
|
||||
|
@ -500,12 +500,19 @@ ice_ptp_read_src_clk_reg(struct ice_pf *pf, struct ptp_system_timestamp *sts)
|
||||
* This function must be called periodically to ensure that the cached value
|
||||
* is never more than 2 seconds old. It must also be called whenever the PHC
|
||||
* time has been changed.
|
||||
*
|
||||
* Return:
|
||||
* * 0 - OK, successfully updated
|
||||
* * -EAGAIN - PF was busy, need to reschedule the update
|
||||
*/
|
||||
static void ice_ptp_update_cached_phctime(struct ice_pf *pf)
|
||||
static int ice_ptp_update_cached_phctime(struct ice_pf *pf)
|
||||
{
|
||||
u64 systime;
|
||||
int i;
|
||||
|
||||
if (test_and_set_bit(ICE_CFG_BUSY, pf->state))
|
||||
return -EAGAIN;
|
||||
|
||||
/* Read the current PHC time */
|
||||
systime = ice_ptp_read_src_clk_reg(pf, NULL);
|
||||
|
||||
@ -528,6 +535,9 @@ static void ice_ptp_update_cached_phctime(struct ice_pf *pf)
|
||||
WRITE_ONCE(vsi->rx_rings[j]->cached_phctime, systime);
|
||||
}
|
||||
}
|
||||
clear_bit(ICE_CFG_BUSY, pf->state);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
@ -2330,17 +2340,18 @@ static void ice_ptp_periodic_work(struct kthread_work *work)
|
||||
{
|
||||
struct ice_ptp *ptp = container_of(work, struct ice_ptp, work.work);
|
||||
struct ice_pf *pf = container_of(ptp, struct ice_pf, ptp);
|
||||
int err;
|
||||
|
||||
if (!test_bit(ICE_FLAG_PTP, pf->flags))
|
||||
return;
|
||||
|
||||
ice_ptp_update_cached_phctime(pf);
|
||||
err = ice_ptp_update_cached_phctime(pf);
|
||||
|
||||
ice_ptp_tx_tstamp_cleanup(&pf->hw, &pf->ptp.port.tx);
|
||||
|
||||
/* Run twice a second */
|
||||
/* Run twice a second or reschedule if phc update failed */
|
||||
kthread_queue_delayed_work(ptp->kworker, &ptp->work,
|
||||
msecs_to_jiffies(500));
|
||||
msecs_to_jiffies(err ? 10 : 500));
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -384,9 +384,14 @@ struct ice_ring_container {
|
||||
/* this matches the maximum number of ITR bits, but in usec
|
||||
* values, so it is shifted left one bit (bit zero is ignored)
|
||||
*/
|
||||
u16 itr_setting:13;
|
||||
u16 itr_reserved:2;
|
||||
u16 itr_mode:1;
|
||||
union {
|
||||
struct {
|
||||
u16 itr_setting:13;
|
||||
u16 itr_reserved:2;
|
||||
u16 itr_mode:1;
|
||||
};
|
||||
u16 itr_settings;
|
||||
};
|
||||
enum ice_container_type type;
|
||||
};
|
||||
|
||||
|
@ -5505,7 +5505,8 @@ static void igb_watchdog_task(struct work_struct *work)
|
||||
break;
|
||||
}
|
||||
|
||||
if (adapter->link_speed != SPEED_1000)
|
||||
if (adapter->link_speed != SPEED_1000 ||
|
||||
!hw->phy.ops.read_reg)
|
||||
goto no_wait;
|
||||
|
||||
/* wait for Remote receiver status OK */
|
||||
|
@ -23,7 +23,7 @@ struct mlx5_ct_fs_smfs_matcher {
|
||||
};
|
||||
|
||||
struct mlx5_ct_fs_smfs_matchers {
|
||||
struct mlx5_ct_fs_smfs_matcher smfs_matchers[4];
|
||||
struct mlx5_ct_fs_smfs_matcher smfs_matchers[6];
|
||||
struct list_head used;
|
||||
};
|
||||
|
||||
@ -44,7 +44,8 @@ struct mlx5_ct_fs_smfs_rule {
|
||||
};
|
||||
|
||||
static inline void
|
||||
mlx5_ct_fs_smfs_fill_mask(struct mlx5_ct_fs *fs, struct mlx5_flow_spec *spec, bool ipv4, bool tcp)
|
||||
mlx5_ct_fs_smfs_fill_mask(struct mlx5_ct_fs *fs, struct mlx5_flow_spec *spec, bool ipv4, bool tcp,
|
||||
bool gre)
|
||||
{
|
||||
void *headers_c = MLX5_ADDR_OF(fte_match_param, spec->match_criteria, outer_headers);
|
||||
|
||||
@ -77,7 +78,7 @@ mlx5_ct_fs_smfs_fill_mask(struct mlx5_ct_fs *fs, struct mlx5_flow_spec *spec, bo
|
||||
MLX5_SET_TO_ONES(fte_match_set_lyr_2_4, headers_c, tcp_dport);
|
||||
MLX5_SET(fte_match_set_lyr_2_4, headers_c, tcp_flags,
|
||||
ntohs(MLX5_CT_TCP_FLAGS_MASK));
|
||||
} else {
|
||||
} else if (!gre) {
|
||||
MLX5_SET_TO_ONES(fte_match_set_lyr_2_4, headers_c, udp_sport);
|
||||
MLX5_SET_TO_ONES(fte_match_set_lyr_2_4, headers_c, udp_dport);
|
||||
}
|
||||
@ -87,7 +88,7 @@ mlx5_ct_fs_smfs_fill_mask(struct mlx5_ct_fs *fs, struct mlx5_flow_spec *spec, bo
|
||||
|
||||
static struct mlx5dr_matcher *
|
||||
mlx5_ct_fs_smfs_matcher_create(struct mlx5_ct_fs *fs, struct mlx5dr_table *tbl, bool ipv4,
|
||||
bool tcp, u32 priority)
|
||||
bool tcp, bool gre, u32 priority)
|
||||
{
|
||||
struct mlx5dr_matcher *dr_matcher;
|
||||
struct mlx5_flow_spec *spec;
|
||||
@ -96,7 +97,7 @@ mlx5_ct_fs_smfs_matcher_create(struct mlx5_ct_fs *fs, struct mlx5dr_table *tbl,
|
||||
if (!spec)
|
||||
return ERR_PTR(-ENOMEM);
|
||||
|
||||
mlx5_ct_fs_smfs_fill_mask(fs, spec, ipv4, tcp);
|
||||
mlx5_ct_fs_smfs_fill_mask(fs, spec, ipv4, tcp, gre);
|
||||
spec->match_criteria_enable = MLX5_MATCH_MISC_PARAMETERS_2 | MLX5_MATCH_OUTER_HEADERS;
|
||||
|
||||
dr_matcher = mlx5_smfs_matcher_create(tbl, priority, spec);
|
||||
@ -108,7 +109,7 @@ mlx5_ct_fs_smfs_matcher_create(struct mlx5_ct_fs *fs, struct mlx5dr_table *tbl,
|
||||
}
|
||||
|
||||
static struct mlx5_ct_fs_smfs_matcher *
|
||||
mlx5_ct_fs_smfs_matcher_get(struct mlx5_ct_fs *fs, bool nat, bool ipv4, bool tcp)
|
||||
mlx5_ct_fs_smfs_matcher_get(struct mlx5_ct_fs *fs, bool nat, bool ipv4, bool tcp, bool gre)
|
||||
{
|
||||
struct mlx5_ct_fs_smfs *fs_smfs = mlx5_ct_fs_priv(fs);
|
||||
struct mlx5_ct_fs_smfs_matcher *m, *smfs_matcher;
|
||||
@ -119,7 +120,7 @@ mlx5_ct_fs_smfs_matcher_get(struct mlx5_ct_fs *fs, bool nat, bool ipv4, bool tcp
|
||||
int prio;
|
||||
|
||||
matchers = nat ? &fs_smfs->matchers_nat : &fs_smfs->matchers;
|
||||
smfs_matcher = &matchers->smfs_matchers[ipv4 * 2 + tcp];
|
||||
smfs_matcher = &matchers->smfs_matchers[ipv4 * 3 + tcp * 2 + gre];
|
||||
|
||||
if (refcount_inc_not_zero(&smfs_matcher->ref))
|
||||
return smfs_matcher;
|
||||
@ -145,11 +146,11 @@ mlx5_ct_fs_smfs_matcher_get(struct mlx5_ct_fs *fs, bool nat, bool ipv4, bool tcp
|
||||
}
|
||||
|
||||
tbl = nat ? fs_smfs->ct_nat_tbl : fs_smfs->ct_tbl;
|
||||
dr_matcher = mlx5_ct_fs_smfs_matcher_create(fs, tbl, ipv4, tcp, prio);
|
||||
dr_matcher = mlx5_ct_fs_smfs_matcher_create(fs, tbl, ipv4, tcp, gre, prio);
|
||||
if (IS_ERR(dr_matcher)) {
|
||||
netdev_warn(fs->netdev,
|
||||
"ct_fs_smfs: failed to create matcher (nat %d, ipv4 %d, tcp %d), err: %ld\n",
|
||||
nat, ipv4, tcp, PTR_ERR(dr_matcher));
|
||||
"ct_fs_smfs: failed to create matcher (nat %d, ipv4 %d, tcp %d, gre %d), err: %ld\n",
|
||||
nat, ipv4, tcp, gre, PTR_ERR(dr_matcher));
|
||||
|
||||
smfs_matcher = ERR_CAST(dr_matcher);
|
||||
goto out_unlock;
|
||||
@ -222,16 +223,17 @@ mlx5_ct_fs_smfs_destroy(struct mlx5_ct_fs *fs)
|
||||
static inline bool
|
||||
mlx5_tc_ct_valid_used_dissector_keys(const u32 used_keys)
|
||||
{
|
||||
#define DISSECTOR_BIT(name) BIT(FLOW_DISSECTOR_KEY_ ## name)
|
||||
const u32 basic_keys = DISSECTOR_BIT(BASIC) | DISSECTOR_BIT(CONTROL) |
|
||||
DISSECTOR_BIT(PORTS) | DISSECTOR_BIT(META);
|
||||
const u32 ipv4_tcp = basic_keys | DISSECTOR_BIT(IPV4_ADDRS) | DISSECTOR_BIT(TCP);
|
||||
const u32 ipv4_udp = basic_keys | DISSECTOR_BIT(IPV4_ADDRS);
|
||||
const u32 ipv6_tcp = basic_keys | DISSECTOR_BIT(IPV6_ADDRS) | DISSECTOR_BIT(TCP);
|
||||
const u32 ipv6_udp = basic_keys | DISSECTOR_BIT(IPV6_ADDRS);
|
||||
#define DISS_BIT(name) BIT(FLOW_DISSECTOR_KEY_ ## name)
|
||||
const u32 basic_keys = DISS_BIT(BASIC) | DISS_BIT(CONTROL) | DISS_BIT(META);
|
||||
const u32 ipv4_tcp = basic_keys | DISS_BIT(IPV4_ADDRS) | DISS_BIT(PORTS) | DISS_BIT(TCP);
|
||||
const u32 ipv6_tcp = basic_keys | DISS_BIT(IPV6_ADDRS) | DISS_BIT(PORTS) | DISS_BIT(TCP);
|
||||
const u32 ipv4_udp = basic_keys | DISS_BIT(IPV4_ADDRS) | DISS_BIT(PORTS);
|
||||
const u32 ipv6_udp = basic_keys | DISS_BIT(IPV6_ADDRS) | DISS_BIT(PORTS);
|
||||
const u32 ipv4_gre = basic_keys | DISS_BIT(IPV4_ADDRS);
|
||||
const u32 ipv6_gre = basic_keys | DISS_BIT(IPV6_ADDRS);
|
||||
|
||||
return (used_keys == ipv4_tcp || used_keys == ipv4_udp || used_keys == ipv6_tcp ||
|
||||
used_keys == ipv6_udp);
|
||||
used_keys == ipv6_udp || used_keys == ipv4_gre || used_keys == ipv6_gre);
|
||||
}
|
||||
|
||||
static bool
|
||||
@ -254,20 +256,24 @@ mlx5_ct_fs_smfs_ct_validate_flow_rule(struct mlx5_ct_fs *fs, struct flow_rule *f
|
||||
flow_rule_match_control(flow_rule, &control);
|
||||
flow_rule_match_ipv4_addrs(flow_rule, &ipv4_addrs);
|
||||
flow_rule_match_ipv6_addrs(flow_rule, &ipv6_addrs);
|
||||
flow_rule_match_ports(flow_rule, &ports);
|
||||
flow_rule_match_tcp(flow_rule, &tcp);
|
||||
if (basic.key->ip_proto != IPPROTO_GRE)
|
||||
flow_rule_match_ports(flow_rule, &ports);
|
||||
if (basic.key->ip_proto == IPPROTO_TCP)
|
||||
flow_rule_match_tcp(flow_rule, &tcp);
|
||||
|
||||
if (basic.mask->n_proto != htons(0xFFFF) ||
|
||||
(basic.key->n_proto != htons(ETH_P_IP) && basic.key->n_proto != htons(ETH_P_IPV6)) ||
|
||||
basic.mask->ip_proto != 0xFF ||
|
||||
(basic.key->ip_proto != IPPROTO_UDP && basic.key->ip_proto != IPPROTO_TCP)) {
|
||||
(basic.key->ip_proto != IPPROTO_UDP && basic.key->ip_proto != IPPROTO_TCP &&
|
||||
basic.key->ip_proto != IPPROTO_GRE)) {
|
||||
ct_dbg("rule uses unexpected basic match (n_proto 0x%04x/0x%04x, ip_proto 0x%02x/0x%02x)",
|
||||
ntohs(basic.key->n_proto), ntohs(basic.mask->n_proto),
|
||||
basic.key->ip_proto, basic.mask->ip_proto);
|
||||
return false;
|
||||
}
|
||||
|
||||
if (ports.mask->src != htons(0xFFFF) || ports.mask->dst != htons(0xFFFF)) {
|
||||
if (basic.key->ip_proto != IPPROTO_GRE &&
|
||||
(ports.mask->src != htons(0xFFFF) || ports.mask->dst != htons(0xFFFF))) {
|
||||
ct_dbg("rule uses ports match (src 0x%04x, dst 0x%04x)",
|
||||
ports.mask->src, ports.mask->dst);
|
||||
return false;
|
||||
@ -291,7 +297,7 @@ mlx5_ct_fs_smfs_ct_rule_add(struct mlx5_ct_fs *fs, struct mlx5_flow_spec *spec,
|
||||
struct mlx5dr_action *actions[5];
|
||||
struct mlx5dr_rule *rule;
|
||||
int num_actions = 0, err;
|
||||
bool nat, tcp, ipv4;
|
||||
bool nat, tcp, ipv4, gre;
|
||||
|
||||
if (!mlx5_ct_fs_smfs_ct_validate_flow_rule(fs, flow_rule))
|
||||
return ERR_PTR(-EOPNOTSUPP);
|
||||
@ -314,15 +320,17 @@ mlx5_ct_fs_smfs_ct_rule_add(struct mlx5_ct_fs *fs, struct mlx5_flow_spec *spec,
|
||||
ipv4 = mlx5e_tc_get_ip_version(spec, true) == 4;
|
||||
tcp = MLX5_GET(fte_match_param, spec->match_value,
|
||||
outer_headers.ip_protocol) == IPPROTO_TCP;
|
||||
gre = MLX5_GET(fte_match_param, spec->match_value,
|
||||
outer_headers.ip_protocol) == IPPROTO_GRE;
|
||||
|
||||
smfs_matcher = mlx5_ct_fs_smfs_matcher_get(fs, nat, ipv4, tcp);
|
||||
smfs_matcher = mlx5_ct_fs_smfs_matcher_get(fs, nat, ipv4, tcp, gre);
|
||||
if (IS_ERR(smfs_matcher)) {
|
||||
err = PTR_ERR(smfs_matcher);
|
||||
goto err_matcher;
|
||||
}
|
||||
|
||||
rule = mlx5_smfs_rule_create(smfs_matcher->dr_matcher, spec, num_actions, actions,
|
||||
MLX5_FLOW_CONTEXT_FLOW_SOURCE_ANY_VPORT);
|
||||
spec->flow_context.flow_source);
|
||||
if (!rule) {
|
||||
err = -EINVAL;
|
||||
goto err_create;
|
||||
|
@ -14,19 +14,26 @@ static int mlx5e_trap_napi_poll(struct napi_struct *napi, int budget)
|
||||
bool busy = false;
|
||||
int work_done = 0;
|
||||
|
||||
rcu_read_lock();
|
||||
|
||||
ch_stats->poll++;
|
||||
|
||||
work_done = mlx5e_poll_rx_cq(&rq->cq, budget);
|
||||
busy |= work_done == budget;
|
||||
busy |= rq->post_wqes(rq);
|
||||
|
||||
if (busy)
|
||||
return budget;
|
||||
if (busy) {
|
||||
work_done = budget;
|
||||
goto out;
|
||||
}
|
||||
|
||||
if (unlikely(!napi_complete_done(napi, work_done)))
|
||||
return work_done;
|
||||
goto out;
|
||||
|
||||
mlx5e_cq_arm(&rq->cq);
|
||||
|
||||
out:
|
||||
rcu_read_unlock();
|
||||
return work_done;
|
||||
}
|
||||
|
||||
|
@ -3864,6 +3864,10 @@ static netdev_features_t mlx5e_fix_uplink_rep_features(struct net_device *netdev
|
||||
if (netdev->features & NETIF_F_NTUPLE)
|
||||
netdev_warn(netdev, "Disabling ntuple, not supported in switchdev mode\n");
|
||||
|
||||
features &= ~NETIF_F_GRO_HW;
|
||||
if (netdev->features & NETIF_F_GRO_HW)
|
||||
netdev_warn(netdev, "Disabling HW_GRO, not supported in switchdev mode\n");
|
||||
|
||||
return features;
|
||||
}
|
||||
|
||||
@ -3896,6 +3900,25 @@ static netdev_features_t mlx5e_fix_features(struct net_device *netdev,
|
||||
}
|
||||
}
|
||||
|
||||
if (params->xdp_prog) {
|
||||
if (features & NETIF_F_LRO) {
|
||||
netdev_warn(netdev, "LRO is incompatible with XDP\n");
|
||||
features &= ~NETIF_F_LRO;
|
||||
}
|
||||
if (features & NETIF_F_GRO_HW) {
|
||||
netdev_warn(netdev, "HW GRO is incompatible with XDP\n");
|
||||
features &= ~NETIF_F_GRO_HW;
|
||||
}
|
||||
}
|
||||
|
||||
if (priv->xsk.refcnt) {
|
||||
if (features & NETIF_F_GRO_HW) {
|
||||
netdev_warn(netdev, "HW GRO is incompatible with AF_XDP (%u XSKs are active)\n",
|
||||
priv->xsk.refcnt);
|
||||
features &= ~NETIF_F_GRO_HW;
|
||||
}
|
||||
}
|
||||
|
||||
if (MLX5E_GET_PFLAG(params, MLX5E_PFLAG_RX_CQE_COMPRESS)) {
|
||||
features &= ~NETIF_F_RXHASH;
|
||||
if (netdev->features & NETIF_F_RXHASH)
|
||||
@ -4850,10 +4873,6 @@ static void mlx5e_build_nic_netdev(struct net_device *netdev)
|
||||
netdev->hw_features |= NETIF_F_HW_VLAN_CTAG_FILTER;
|
||||
netdev->hw_features |= NETIF_F_HW_VLAN_STAG_TX;
|
||||
|
||||
if (!!MLX5_CAP_GEN(mdev, shampo) &&
|
||||
mlx5e_check_fragmented_striding_rq_cap(mdev))
|
||||
netdev->hw_features |= NETIF_F_GRO_HW;
|
||||
|
||||
if (mlx5e_tunnel_any_tx_proto_supported(mdev)) {
|
||||
netdev->hw_enc_features |= NETIF_F_HW_CSUM;
|
||||
netdev->hw_enc_features |= NETIF_F_TSO;
|
||||
|
@ -2663,28 +2663,6 @@ static void cleanup_root_ns(struct mlx5_flow_root_namespace *root_ns)
|
||||
clean_tree(&root_ns->ns.node);
|
||||
}
|
||||
|
||||
void mlx5_cleanup_fs(struct mlx5_core_dev *dev)
|
||||
{
|
||||
struct mlx5_flow_steering *steering = dev->priv.steering;
|
||||
|
||||
cleanup_root_ns(steering->root_ns);
|
||||
cleanup_root_ns(steering->fdb_root_ns);
|
||||
steering->fdb_root_ns = NULL;
|
||||
kfree(steering->fdb_sub_ns);
|
||||
steering->fdb_sub_ns = NULL;
|
||||
cleanup_root_ns(steering->port_sel_root_ns);
|
||||
cleanup_root_ns(steering->sniffer_rx_root_ns);
|
||||
cleanup_root_ns(steering->sniffer_tx_root_ns);
|
||||
cleanup_root_ns(steering->rdma_rx_root_ns);
|
||||
cleanup_root_ns(steering->rdma_tx_root_ns);
|
||||
cleanup_root_ns(steering->egress_root_ns);
|
||||
mlx5_cleanup_fc_stats(dev);
|
||||
kmem_cache_destroy(steering->ftes_cache);
|
||||
kmem_cache_destroy(steering->fgs_cache);
|
||||
mlx5_ft_pool_destroy(dev);
|
||||
kfree(steering);
|
||||
}
|
||||
|
||||
static int init_sniffer_tx_root_ns(struct mlx5_flow_steering *steering)
|
||||
{
|
||||
struct fs_prio *prio;
|
||||
@ -3086,43 +3064,28 @@ cleanup:
|
||||
return err;
|
||||
}
|
||||
|
||||
int mlx5_init_fs(struct mlx5_core_dev *dev)
|
||||
void mlx5_fs_core_cleanup(struct mlx5_core_dev *dev)
|
||||
{
|
||||
struct mlx5_flow_steering *steering;
|
||||
struct mlx5_flow_steering *steering = dev->priv.steering;
|
||||
|
||||
cleanup_root_ns(steering->root_ns);
|
||||
cleanup_root_ns(steering->fdb_root_ns);
|
||||
steering->fdb_root_ns = NULL;
|
||||
kfree(steering->fdb_sub_ns);
|
||||
steering->fdb_sub_ns = NULL;
|
||||
cleanup_root_ns(steering->port_sel_root_ns);
|
||||
cleanup_root_ns(steering->sniffer_rx_root_ns);
|
||||
cleanup_root_ns(steering->sniffer_tx_root_ns);
|
||||
cleanup_root_ns(steering->rdma_rx_root_ns);
|
||||
cleanup_root_ns(steering->rdma_tx_root_ns);
|
||||
cleanup_root_ns(steering->egress_root_ns);
|
||||
}
|
||||
|
||||
int mlx5_fs_core_init(struct mlx5_core_dev *dev)
|
||||
{
|
||||
struct mlx5_flow_steering *steering = dev->priv.steering;
|
||||
int err = 0;
|
||||
|
||||
err = mlx5_init_fc_stats(dev);
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
err = mlx5_ft_pool_init(dev);
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
steering = kzalloc(sizeof(*steering), GFP_KERNEL);
|
||||
if (!steering) {
|
||||
err = -ENOMEM;
|
||||
goto err;
|
||||
}
|
||||
|
||||
steering->dev = dev;
|
||||
dev->priv.steering = steering;
|
||||
|
||||
if (mlx5_fs_dr_is_supported(dev))
|
||||
steering->mode = MLX5_FLOW_STEERING_MODE_SMFS;
|
||||
else
|
||||
steering->mode = MLX5_FLOW_STEERING_MODE_DMFS;
|
||||
|
||||
steering->fgs_cache = kmem_cache_create("mlx5_fs_fgs",
|
||||
sizeof(struct mlx5_flow_group), 0,
|
||||
0, NULL);
|
||||
steering->ftes_cache = kmem_cache_create("mlx5_fs_ftes", sizeof(struct fs_fte), 0,
|
||||
0, NULL);
|
||||
if (!steering->ftes_cache || !steering->fgs_cache) {
|
||||
err = -ENOMEM;
|
||||
goto err;
|
||||
}
|
||||
|
||||
if ((((MLX5_CAP_GEN(dev, port_type) == MLX5_CAP_PORT_TYPE_ETH) &&
|
||||
(MLX5_CAP_GEN(dev, nic_flow_table))) ||
|
||||
((MLX5_CAP_GEN(dev, port_type) == MLX5_CAP_PORT_TYPE_IB) &&
|
||||
@ -3180,8 +3143,64 @@ int mlx5_init_fs(struct mlx5_core_dev *dev)
|
||||
}
|
||||
|
||||
return 0;
|
||||
|
||||
err:
|
||||
mlx5_cleanup_fs(dev);
|
||||
mlx5_fs_core_cleanup(dev);
|
||||
return err;
|
||||
}
|
||||
|
||||
void mlx5_fs_core_free(struct mlx5_core_dev *dev)
|
||||
{
|
||||
struct mlx5_flow_steering *steering = dev->priv.steering;
|
||||
|
||||
kmem_cache_destroy(steering->ftes_cache);
|
||||
kmem_cache_destroy(steering->fgs_cache);
|
||||
kfree(steering);
|
||||
mlx5_ft_pool_destroy(dev);
|
||||
mlx5_cleanup_fc_stats(dev);
|
||||
}
|
||||
|
||||
int mlx5_fs_core_alloc(struct mlx5_core_dev *dev)
|
||||
{
|
||||
struct mlx5_flow_steering *steering;
|
||||
int err = 0;
|
||||
|
||||
err = mlx5_init_fc_stats(dev);
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
err = mlx5_ft_pool_init(dev);
|
||||
if (err)
|
||||
goto err;
|
||||
|
||||
steering = kzalloc(sizeof(*steering), GFP_KERNEL);
|
||||
if (!steering) {
|
||||
err = -ENOMEM;
|
||||
goto err;
|
||||
}
|
||||
|
||||
steering->dev = dev;
|
||||
dev->priv.steering = steering;
|
||||
|
||||
if (mlx5_fs_dr_is_supported(dev))
|
||||
steering->mode = MLX5_FLOW_STEERING_MODE_SMFS;
|
||||
else
|
||||
steering->mode = MLX5_FLOW_STEERING_MODE_DMFS;
|
||||
|
||||
steering->fgs_cache = kmem_cache_create("mlx5_fs_fgs",
|
||||
sizeof(struct mlx5_flow_group), 0,
|
||||
0, NULL);
|
||||
steering->ftes_cache = kmem_cache_create("mlx5_fs_ftes", sizeof(struct fs_fte), 0,
|
||||
0, NULL);
|
||||
if (!steering->ftes_cache || !steering->fgs_cache) {
|
||||
err = -ENOMEM;
|
||||
goto err;
|
||||
}
|
||||
|
||||
return 0;
|
||||
|
||||
err:
|
||||
mlx5_fs_core_free(dev);
|
||||
return err;
|
||||
}
|
||||
|
||||
|
@ -298,8 +298,10 @@ int mlx5_flow_namespace_set_peer(struct mlx5_flow_root_namespace *ns,
|
||||
int mlx5_flow_namespace_set_mode(struct mlx5_flow_namespace *ns,
|
||||
enum mlx5_flow_steering_mode mode);
|
||||
|
||||
int mlx5_init_fs(struct mlx5_core_dev *dev);
|
||||
void mlx5_cleanup_fs(struct mlx5_core_dev *dev);
|
||||
int mlx5_fs_core_alloc(struct mlx5_core_dev *dev);
|
||||
void mlx5_fs_core_free(struct mlx5_core_dev *dev);
|
||||
int mlx5_fs_core_init(struct mlx5_core_dev *dev);
|
||||
void mlx5_fs_core_cleanup(struct mlx5_core_dev *dev);
|
||||
|
||||
int mlx5_fs_egress_acls_init(struct mlx5_core_dev *dev, int total_vports);
|
||||
void mlx5_fs_egress_acls_cleanup(struct mlx5_core_dev *dev);
|
||||
|
@ -8,7 +8,8 @@
|
||||
enum {
|
||||
MLX5_FW_RESET_FLAGS_RESET_REQUESTED,
|
||||
MLX5_FW_RESET_FLAGS_NACK_RESET_REQUEST,
|
||||
MLX5_FW_RESET_FLAGS_PENDING_COMP
|
||||
MLX5_FW_RESET_FLAGS_PENDING_COMP,
|
||||
MLX5_FW_RESET_FLAGS_DROP_NEW_REQUESTS
|
||||
};
|
||||
|
||||
struct mlx5_fw_reset {
|
||||
@ -208,7 +209,10 @@ static void poll_sync_reset(struct timer_list *t)
|
||||
|
||||
if (fatal_error) {
|
||||
mlx5_core_warn(dev, "Got Device Reset\n");
|
||||
queue_work(fw_reset->wq, &fw_reset->reset_reload_work);
|
||||
if (!test_bit(MLX5_FW_RESET_FLAGS_DROP_NEW_REQUESTS, &fw_reset->reset_flags))
|
||||
queue_work(fw_reset->wq, &fw_reset->reset_reload_work);
|
||||
else
|
||||
mlx5_core_err(dev, "Device is being removed, Drop new reset work\n");
|
||||
return;
|
||||
}
|
||||
|
||||
@ -433,9 +437,12 @@ static int fw_reset_event_notifier(struct notifier_block *nb, unsigned long acti
|
||||
struct mlx5_fw_reset *fw_reset = mlx5_nb_cof(nb, struct mlx5_fw_reset, nb);
|
||||
struct mlx5_eqe *eqe = data;
|
||||
|
||||
if (test_bit(MLX5_FW_RESET_FLAGS_DROP_NEW_REQUESTS, &fw_reset->reset_flags))
|
||||
return NOTIFY_DONE;
|
||||
|
||||
switch (eqe->sub_type) {
|
||||
case MLX5_GENERAL_SUBTYPE_FW_LIVE_PATCH_EVENT:
|
||||
queue_work(fw_reset->wq, &fw_reset->fw_live_patch_work);
|
||||
queue_work(fw_reset->wq, &fw_reset->fw_live_patch_work);
|
||||
break;
|
||||
case MLX5_GENERAL_SUBTYPE_PCI_SYNC_FOR_FW_UPDATE_EVENT:
|
||||
mlx5_sync_reset_events_handle(fw_reset, eqe);
|
||||
@ -479,6 +486,18 @@ void mlx5_fw_reset_events_stop(struct mlx5_core_dev *dev)
|
||||
mlx5_eq_notifier_unregister(dev, &dev->priv.fw_reset->nb);
|
||||
}
|
||||
|
||||
void mlx5_drain_fw_reset(struct mlx5_core_dev *dev)
|
||||
{
|
||||
struct mlx5_fw_reset *fw_reset = dev->priv.fw_reset;
|
||||
|
||||
set_bit(MLX5_FW_RESET_FLAGS_DROP_NEW_REQUESTS, &fw_reset->reset_flags);
|
||||
cancel_work_sync(&fw_reset->fw_live_patch_work);
|
||||
cancel_work_sync(&fw_reset->reset_request_work);
|
||||
cancel_work_sync(&fw_reset->reset_reload_work);
|
||||
cancel_work_sync(&fw_reset->reset_now_work);
|
||||
cancel_work_sync(&fw_reset->reset_abort_work);
|
||||
}
|
||||
|
||||
int mlx5_fw_reset_init(struct mlx5_core_dev *dev)
|
||||
{
|
||||
struct mlx5_fw_reset *fw_reset = kzalloc(sizeof(*fw_reset), GFP_KERNEL);
|
||||
|
@ -16,6 +16,7 @@ int mlx5_fw_reset_set_live_patch(struct mlx5_core_dev *dev);
|
||||
int mlx5_fw_reset_wait_reset_done(struct mlx5_core_dev *dev);
|
||||
void mlx5_fw_reset_events_start(struct mlx5_core_dev *dev);
|
||||
void mlx5_fw_reset_events_stop(struct mlx5_core_dev *dev);
|
||||
void mlx5_drain_fw_reset(struct mlx5_core_dev *dev);
|
||||
int mlx5_fw_reset_init(struct mlx5_core_dev *dev);
|
||||
void mlx5_fw_reset_cleanup(struct mlx5_core_dev *dev);
|
||||
|
||||
|
@ -938,6 +938,12 @@ static int mlx5_init_once(struct mlx5_core_dev *dev)
|
||||
goto err_sf_table_cleanup;
|
||||
}
|
||||
|
||||
err = mlx5_fs_core_alloc(dev);
|
||||
if (err) {
|
||||
mlx5_core_err(dev, "Failed to alloc flow steering\n");
|
||||
goto err_fs;
|
||||
}
|
||||
|
||||
dev->dm = mlx5_dm_create(dev);
|
||||
if (IS_ERR(dev->dm))
|
||||
mlx5_core_warn(dev, "Failed to init device memory%d\n", err);
|
||||
@ -948,6 +954,8 @@ static int mlx5_init_once(struct mlx5_core_dev *dev)
|
||||
|
||||
return 0;
|
||||
|
||||
err_fs:
|
||||
mlx5_sf_table_cleanup(dev);
|
||||
err_sf_table_cleanup:
|
||||
mlx5_sf_hw_table_cleanup(dev);
|
||||
err_sf_hw_table_cleanup:
|
||||
@ -985,6 +993,7 @@ static void mlx5_cleanup_once(struct mlx5_core_dev *dev)
|
||||
mlx5_hv_vhca_destroy(dev->hv_vhca);
|
||||
mlx5_fw_tracer_destroy(dev->tracer);
|
||||
mlx5_dm_cleanup(dev);
|
||||
mlx5_fs_core_free(dev);
|
||||
mlx5_sf_table_cleanup(dev);
|
||||
mlx5_sf_hw_table_cleanup(dev);
|
||||
mlx5_vhca_event_cleanup(dev);
|
||||
@ -1191,7 +1200,7 @@ static int mlx5_load(struct mlx5_core_dev *dev)
|
||||
goto err_tls_start;
|
||||
}
|
||||
|
||||
err = mlx5_init_fs(dev);
|
||||
err = mlx5_fs_core_init(dev);
|
||||
if (err) {
|
||||
mlx5_core_err(dev, "Failed to init flow steering\n");
|
||||
goto err_fs;
|
||||
@ -1236,7 +1245,7 @@ err_ec:
|
||||
err_vhca:
|
||||
mlx5_vhca_event_stop(dev);
|
||||
err_set_hca:
|
||||
mlx5_cleanup_fs(dev);
|
||||
mlx5_fs_core_cleanup(dev);
|
||||
err_fs:
|
||||
mlx5_accel_tls_cleanup(dev);
|
||||
err_tls_start:
|
||||
@ -1265,7 +1274,7 @@ static void mlx5_unload(struct mlx5_core_dev *dev)
|
||||
mlx5_ec_cleanup(dev);
|
||||
mlx5_sf_hw_table_destroy(dev);
|
||||
mlx5_vhca_event_stop(dev);
|
||||
mlx5_cleanup_fs(dev);
|
||||
mlx5_fs_core_cleanup(dev);
|
||||
mlx5_accel_ipsec_cleanup(dev);
|
||||
mlx5_accel_tls_cleanup(dev);
|
||||
mlx5_fpga_device_stop(dev);
|
||||
@ -1618,6 +1627,10 @@ static void remove_one(struct pci_dev *pdev)
|
||||
struct mlx5_core_dev *dev = pci_get_drvdata(pdev);
|
||||
struct devlink *devlink = priv_to_devlink(dev);
|
||||
|
||||
/* mlx5_drain_fw_reset() is using devlink APIs. Hence, we must drain
|
||||
* fw_reset before unregistering the devlink.
|
||||
*/
|
||||
mlx5_drain_fw_reset(dev);
|
||||
devlink_unregister(devlink);
|
||||
mlx5_sriov_disable(pdev);
|
||||
mlx5_crdump_disable(dev);
|
||||
|
@ -530,6 +530,37 @@ static int dr_action_handle_cs_recalc(struct mlx5dr_domain *dmn,
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void dr_action_modify_ttl_adjust(struct mlx5dr_domain *dmn,
|
||||
struct mlx5dr_ste_actions_attr *attr,
|
||||
bool rx_rule,
|
||||
bool *recalc_cs_required)
|
||||
{
|
||||
*recalc_cs_required = false;
|
||||
|
||||
/* if device supports csum recalculation - no adjustment needed */
|
||||
if (mlx5dr_ste_supp_ttl_cs_recalc(&dmn->info.caps))
|
||||
return;
|
||||
|
||||
/* no adjustment needed on TX rules */
|
||||
if (!rx_rule)
|
||||
return;
|
||||
|
||||
if (!MLX5_CAP_ESW_FLOWTABLE(dmn->mdev, fdb_ipv4_ttl_modify)) {
|
||||
/* Ignore the modify TTL action.
|
||||
* It is always kept as last HW action.
|
||||
*/
|
||||
attr->modify_actions--;
|
||||
return;
|
||||
}
|
||||
|
||||
if (dmn->type == MLX5DR_DOMAIN_TYPE_FDB)
|
||||
/* Due to a HW bug on some devices, modifying TTL on RX flows
|
||||
* will cause an incorrect checksum calculation. In such cases
|
||||
* we will use a FW table to recalculate the checksum.
|
||||
*/
|
||||
*recalc_cs_required = true;
|
||||
}
|
||||
|
||||
static void dr_action_print_sequence(struct mlx5dr_domain *dmn,
|
||||
struct mlx5dr_action *actions[],
|
||||
int last_idx)
|
||||
@ -650,8 +681,9 @@ int mlx5dr_actions_build_ste_arr(struct mlx5dr_matcher *matcher,
|
||||
case DR_ACTION_TYP_MODIFY_HDR:
|
||||
attr.modify_index = action->rewrite->index;
|
||||
attr.modify_actions = action->rewrite->num_of_actions;
|
||||
recalc_cs_required = action->rewrite->modify_ttl &&
|
||||
!mlx5dr_ste_supp_ttl_cs_recalc(&dmn->info.caps);
|
||||
if (action->rewrite->modify_ttl)
|
||||
dr_action_modify_ttl_adjust(dmn, &attr, rx_rule,
|
||||
&recalc_cs_required);
|
||||
break;
|
||||
case DR_ACTION_TYP_L2_TO_TNL_L2:
|
||||
case DR_ACTION_TYP_L2_TO_TNL_L3:
|
||||
@ -732,12 +764,7 @@ int mlx5dr_actions_build_ste_arr(struct mlx5dr_matcher *matcher,
|
||||
*new_hw_ste_arr_sz = nic_matcher->num_of_builders;
|
||||
last_ste = ste_arr + DR_STE_SIZE * (nic_matcher->num_of_builders - 1);
|
||||
|
||||
/* Due to a HW bug in some devices, modifying TTL on RX flows will
|
||||
* cause an incorrect checksum calculation. In this case we will
|
||||
* use a FW table to recalculate.
|
||||
*/
|
||||
if (dmn->type == MLX5DR_DOMAIN_TYPE_FDB &&
|
||||
rx_rule && recalc_cs_required && dest_action) {
|
||||
if (recalc_cs_required && dest_action) {
|
||||
ret = dr_action_handle_cs_recalc(dmn, dest_action, &attr.final_icm_addr);
|
||||
if (ret) {
|
||||
mlx5dr_err(dmn,
|
||||
@ -842,7 +869,8 @@ struct mlx5dr_action *
|
||||
mlx5dr_action_create_mult_dest_tbl(struct mlx5dr_domain *dmn,
|
||||
struct mlx5dr_action_dest *dests,
|
||||
u32 num_of_dests,
|
||||
bool ignore_flow_level)
|
||||
bool ignore_flow_level,
|
||||
u32 flow_source)
|
||||
{
|
||||
struct mlx5dr_cmd_flow_destination_hw_info *hw_dests;
|
||||
struct mlx5dr_action **ref_actions;
|
||||
@ -914,7 +942,8 @@ mlx5dr_action_create_mult_dest_tbl(struct mlx5dr_domain *dmn,
|
||||
reformat_req,
|
||||
&action->dest_tbl->fw_tbl.id,
|
||||
&action->dest_tbl->fw_tbl.group_id,
|
||||
ignore_flow_level);
|
||||
ignore_flow_level,
|
||||
flow_source);
|
||||
if (ret)
|
||||
goto free_action;
|
||||
|
||||
@ -1556,12 +1585,6 @@ dr_action_modify_check_is_ttl_modify(const void *sw_action)
|
||||
return sw_field == MLX5_ACTION_IN_FIELD_OUT_IP_TTL;
|
||||
}
|
||||
|
||||
static bool dr_action_modify_ttl_ignore(struct mlx5dr_domain *dmn)
|
||||
{
|
||||
return !mlx5dr_ste_supp_ttl_cs_recalc(&dmn->info.caps) &&
|
||||
!MLX5_CAP_ESW_FLOWTABLE(dmn->mdev, fdb_ipv4_ttl_modify);
|
||||
}
|
||||
|
||||
static int dr_actions_convert_modify_header(struct mlx5dr_action *action,
|
||||
u32 max_hw_actions,
|
||||
u32 num_sw_actions,
|
||||
@ -1573,6 +1596,7 @@ static int dr_actions_convert_modify_header(struct mlx5dr_action *action,
|
||||
const struct mlx5dr_ste_action_modify_field *hw_dst_action_info;
|
||||
const struct mlx5dr_ste_action_modify_field *hw_src_action_info;
|
||||
struct mlx5dr_domain *dmn = action->rewrite->dmn;
|
||||
__be64 *modify_ttl_sw_action = NULL;
|
||||
int ret, i, hw_idx = 0;
|
||||
__be64 *sw_action;
|
||||
__be64 hw_action;
|
||||
@ -1585,8 +1609,14 @@ static int dr_actions_convert_modify_header(struct mlx5dr_action *action,
|
||||
action->rewrite->allow_rx = 1;
|
||||
action->rewrite->allow_tx = 1;
|
||||
|
||||
for (i = 0; i < num_sw_actions; i++) {
|
||||
sw_action = &sw_actions[i];
|
||||
for (i = 0; i < num_sw_actions || modify_ttl_sw_action; i++) {
|
||||
/* modify TTL is handled separately, as a last action */
|
||||
if (i == num_sw_actions) {
|
||||
sw_action = modify_ttl_sw_action;
|
||||
modify_ttl_sw_action = NULL;
|
||||
} else {
|
||||
sw_action = &sw_actions[i];
|
||||
}
|
||||
|
||||
ret = dr_action_modify_check_field_limitation(action,
|
||||
sw_action);
|
||||
@ -1595,10 +1625,9 @@ static int dr_actions_convert_modify_header(struct mlx5dr_action *action,
|
||||
|
||||
if (!(*modify_ttl) &&
|
||||
dr_action_modify_check_is_ttl_modify(sw_action)) {
|
||||
if (dr_action_modify_ttl_ignore(dmn))
|
||||
continue;
|
||||
|
||||
modify_ttl_sw_action = sw_action;
|
||||
*modify_ttl = true;
|
||||
continue;
|
||||
}
|
||||
|
||||
/* Convert SW action to HW action */
|
||||
|
@ -104,7 +104,8 @@ int mlx5dr_fw_create_md_tbl(struct mlx5dr_domain *dmn,
|
||||
bool reformat_req,
|
||||
u32 *tbl_id,
|
||||
u32 *group_id,
|
||||
bool ignore_flow_level)
|
||||
bool ignore_flow_level,
|
||||
u32 flow_source)
|
||||
{
|
||||
struct mlx5dr_cmd_create_flow_table_attr ft_attr = {};
|
||||
struct mlx5dr_cmd_fte_info fte_info = {};
|
||||
@ -139,6 +140,7 @@ int mlx5dr_fw_create_md_tbl(struct mlx5dr_domain *dmn,
|
||||
fte_info.val = val;
|
||||
fte_info.dest_arr = dest;
|
||||
fte_info.ignore_flow_level = ignore_flow_level;
|
||||
fte_info.flow_context.flow_source = flow_source;
|
||||
|
||||
ret = mlx5dr_cmd_set_fte(dmn->mdev, 0, 0, &ft_info, *group_id, &fte_info);
|
||||
if (ret) {
|
||||
|
@ -420,7 +420,7 @@ dr_ste_v0_set_actions_tx(struct mlx5dr_domain *dmn,
|
||||
* encapsulation. The reason for that is that we support
|
||||
* modify headers for outer headers only
|
||||
*/
|
||||
if (action_type_set[DR_ACTION_TYP_MODIFY_HDR]) {
|
||||
if (action_type_set[DR_ACTION_TYP_MODIFY_HDR] && attr->modify_actions) {
|
||||
dr_ste_v0_set_entry_type(last_ste, DR_STE_TYPE_MODIFY_PKT);
|
||||
dr_ste_v0_set_rewrite_actions(last_ste,
|
||||
attr->modify_actions,
|
||||
@ -513,7 +513,7 @@ dr_ste_v0_set_actions_rx(struct mlx5dr_domain *dmn,
|
||||
}
|
||||
}
|
||||
|
||||
if (action_type_set[DR_ACTION_TYP_MODIFY_HDR]) {
|
||||
if (action_type_set[DR_ACTION_TYP_MODIFY_HDR] && attr->modify_actions) {
|
||||
if (dr_ste_v0_get_entry_type(last_ste) == DR_STE_TYPE_MODIFY_PKT)
|
||||
dr_ste_v0_arr_init_next(&last_ste,
|
||||
added_stes,
|
||||
|
@ -1461,7 +1461,8 @@ int mlx5dr_fw_create_md_tbl(struct mlx5dr_domain *dmn,
|
||||
bool reformat_req,
|
||||
u32 *tbl_id,
|
||||
u32 *group_id,
|
||||
bool ignore_flow_level);
|
||||
bool ignore_flow_level,
|
||||
u32 flow_source);
|
||||
void mlx5dr_fw_destroy_md_tbl(struct mlx5dr_domain *dmn, u32 tbl_id,
|
||||
u32 group_id);
|
||||
#endif /* _DR_TYPES_H_ */
|
||||
|
@ -520,6 +520,7 @@ static int mlx5_cmd_dr_create_fte(struct mlx5_flow_root_namespace *ns,
|
||||
} else if (num_term_actions > 1) {
|
||||
bool ignore_flow_level =
|
||||
!!(fte->action.flags & FLOW_ACT_IGNORE_FLOW_LEVEL);
|
||||
u32 flow_source = fte->flow_context.flow_source;
|
||||
|
||||
if (num_actions == MLX5_FLOW_CONTEXT_ACTION_MAX ||
|
||||
fs_dr_num_actions == MLX5_FLOW_CONTEXT_ACTION_MAX) {
|
||||
@ -529,7 +530,8 @@ static int mlx5_cmd_dr_create_fte(struct mlx5_flow_root_namespace *ns,
|
||||
tmp_action = mlx5dr_action_create_mult_dest_tbl(domain,
|
||||
term_actions,
|
||||
num_term_actions,
|
||||
ignore_flow_level);
|
||||
ignore_flow_level,
|
||||
flow_source);
|
||||
if (!tmp_action) {
|
||||
err = -EOPNOTSUPP;
|
||||
goto free_actions;
|
||||
|
@ -99,7 +99,8 @@ struct mlx5dr_action *
|
||||
mlx5dr_action_create_mult_dest_tbl(struct mlx5dr_domain *dmn,
|
||||
struct mlx5dr_action_dest *dests,
|
||||
u32 num_of_dests,
|
||||
bool ignore_flow_level);
|
||||
bool ignore_flow_level,
|
||||
u32 flow_source);
|
||||
|
||||
struct mlx5dr_action *mlx5dr_action_create_drop(void);
|
||||
|
||||
|
@ -103,6 +103,24 @@ static int lan966x_create_targets(struct platform_device *pdev,
|
||||
return 0;
|
||||
}
|
||||
|
||||
static bool lan966x_port_unique_address(struct net_device *dev)
|
||||
{
|
||||
struct lan966x_port *port = netdev_priv(dev);
|
||||
struct lan966x *lan966x = port->lan966x;
|
||||
int p;
|
||||
|
||||
for (p = 0; p < lan966x->num_phys_ports; ++p) {
|
||||
port = lan966x->ports[p];
|
||||
if (!port || port->dev == dev)
|
||||
continue;
|
||||
|
||||
if (ether_addr_equal(dev->dev_addr, port->dev->dev_addr))
|
||||
return false;
|
||||
}
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
static int lan966x_port_set_mac_address(struct net_device *dev, void *p)
|
||||
{
|
||||
struct lan966x_port *port = netdev_priv(dev);
|
||||
@ -110,16 +128,26 @@ static int lan966x_port_set_mac_address(struct net_device *dev, void *p)
|
||||
const struct sockaddr *addr = p;
|
||||
int ret;
|
||||
|
||||
if (ether_addr_equal(addr->sa_data, dev->dev_addr))
|
||||
return 0;
|
||||
|
||||
/* Learn the new net device MAC address in the mac table. */
|
||||
ret = lan966x_mac_cpu_learn(lan966x, addr->sa_data, HOST_PVID);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
/* If there is another port with the same address as the dev, then don't
|
||||
* delete it from the MAC table
|
||||
*/
|
||||
if (!lan966x_port_unique_address(dev))
|
||||
goto out;
|
||||
|
||||
/* Then forget the previous one. */
|
||||
ret = lan966x_mac_cpu_forget(lan966x, dev->dev_addr, HOST_PVID);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
out:
|
||||
eth_hw_addr_set(dev, addr->sa_data);
|
||||
return ret;
|
||||
}
|
||||
|
@ -3614,7 +3614,8 @@ static void ql_reset_work(struct work_struct *work)
|
||||
qdev->mem_map_registers;
|
||||
unsigned long hw_flags;
|
||||
|
||||
if (test_bit((QL_RESET_PER_SCSI | QL_RESET_START), &qdev->flags)) {
|
||||
if (test_bit(QL_RESET_PER_SCSI, &qdev->flags) ||
|
||||
test_bit(QL_RESET_START, &qdev->flags)) {
|
||||
clear_bit(QL_LINK_MASTER, &qdev->flags);
|
||||
|
||||
/*
|
||||
|
@ -1367,9 +1367,10 @@ static void gsi_evt_ring_rx_update(struct gsi_evt_ring *evt_ring, u32 index)
|
||||
struct gsi_event *event_done;
|
||||
struct gsi_event *event;
|
||||
struct gsi_trans *trans;
|
||||
u32 trans_count = 0;
|
||||
u32 byte_count = 0;
|
||||
u32 old_index;
|
||||
u32 event_avail;
|
||||
u32 old_index;
|
||||
|
||||
trans_info = &channel->trans_info;
|
||||
|
||||
@ -1390,6 +1391,7 @@ static void gsi_evt_ring_rx_update(struct gsi_evt_ring *evt_ring, u32 index)
|
||||
do {
|
||||
trans->len = __le16_to_cpu(event->len);
|
||||
byte_count += trans->len;
|
||||
trans_count++;
|
||||
|
||||
/* Move on to the next event and transaction */
|
||||
if (--event_avail)
|
||||
@ -1401,7 +1403,7 @@ static void gsi_evt_ring_rx_update(struct gsi_evt_ring *evt_ring, u32 index)
|
||||
|
||||
/* We record RX bytes when they are received */
|
||||
channel->byte_count += byte_count;
|
||||
channel->trans_count++;
|
||||
channel->trans_count += trans_count;
|
||||
}
|
||||
|
||||
/* Initialize a ring, including allocating DMA memory for its entries */
|
||||
|
@ -1150,13 +1150,12 @@ static void ipa_endpoint_skb_copy(struct ipa_endpoint *endpoint,
|
||||
return;
|
||||
|
||||
skb = __dev_alloc_skb(len, GFP_ATOMIC);
|
||||
if (!skb)
|
||||
return;
|
||||
|
||||
/* Copy the data into the socket buffer and receive it */
|
||||
skb_put(skb, len);
|
||||
memcpy(skb->data, data, len);
|
||||
skb->truesize += extra;
|
||||
if (skb) {
|
||||
/* Copy the data into the socket buffer and receive it */
|
||||
skb_put(skb, len);
|
||||
memcpy(skb->data, data, len);
|
||||
skb->truesize += extra;
|
||||
}
|
||||
|
||||
ipa_modem_skb_rx(endpoint->netdev, skb);
|
||||
}
|
||||
|
@ -125,7 +125,7 @@ static void ipa_qmi_indication(struct ipa_qmi *ipa_qmi)
|
||||
*/
|
||||
static void ipa_qmi_ready(struct ipa_qmi *ipa_qmi)
|
||||
{
|
||||
struct ipa *ipa = container_of(ipa_qmi, struct ipa, qmi);
|
||||
struct ipa *ipa;
|
||||
int ret;
|
||||
|
||||
/* We aren't ready until the modem and microcontroller are */
|
||||
|
@ -988,6 +988,7 @@ static int pppoe_fill_forward_path(struct net_device_path_ctx *ctx,
|
||||
path->encap.proto = htons(ETH_P_PPP_SES);
|
||||
path->encap.id = be16_to_cpu(po->num);
|
||||
memcpy(path->encap.h_dest, po->pppoe_pa.remote, ETH_ALEN);
|
||||
memcpy(ctx->daddr, po->pppoe_pa.remote, ETH_ALEN);
|
||||
path->dev = ctx->dev;
|
||||
ctx->dev = dev;
|
||||
|
||||
|
@ -589,6 +589,7 @@ vmxnet3_rq_alloc_rx_buf(struct vmxnet3_rx_queue *rq, u32 ring_idx,
|
||||
if (dma_mapping_error(&adapter->pdev->dev,
|
||||
rbi->dma_addr)) {
|
||||
dev_kfree_skb_any(rbi->skb);
|
||||
rbi->skb = NULL;
|
||||
rq->stats.rx_buf_alloc_failure++;
|
||||
break;
|
||||
}
|
||||
@ -613,6 +614,7 @@ vmxnet3_rq_alloc_rx_buf(struct vmxnet3_rx_queue *rq, u32 ring_idx,
|
||||
if (dma_mapping_error(&adapter->pdev->dev,
|
||||
rbi->dma_addr)) {
|
||||
put_page(rbi->page);
|
||||
rbi->page = NULL;
|
||||
rq->stats.rx_buf_alloc_failure++;
|
||||
break;
|
||||
}
|
||||
@ -1666,6 +1668,10 @@ vmxnet3_rq_cleanup(struct vmxnet3_rx_queue *rq,
|
||||
u32 i, ring_idx;
|
||||
struct Vmxnet3_RxDesc *rxd;
|
||||
|
||||
/* ring has already been cleaned up */
|
||||
if (!rq->rx_ring[0].base)
|
||||
return;
|
||||
|
||||
for (ring_idx = 0; ring_idx < 2; ring_idx++) {
|
||||
for (i = 0; i < rq->rx_ring[ring_idx].size; i++) {
|
||||
#ifdef __BIG_ENDIAN_BITFIELD
|
||||
|
@ -2787,13 +2787,14 @@ void pn53x_common_clean(struct pn533 *priv)
|
||||
{
|
||||
struct pn533_cmd *cmd, *n;
|
||||
|
||||
/* delete the timer before cleanup the worker */
|
||||
del_timer_sync(&priv->listen_timer);
|
||||
|
||||
flush_delayed_work(&priv->poll_work);
|
||||
destroy_workqueue(priv->wq);
|
||||
|
||||
skb_queue_purge(&priv->resp_q);
|
||||
|
||||
del_timer(&priv->listen_timer);
|
||||
|
||||
list_for_each_entry_safe(cmd, n, &priv->cmd_queue, queue) {
|
||||
list_del(&cmd->queue);
|
||||
kfree(cmd);
|
||||
|
@ -300,7 +300,7 @@ struct ptp_ocp {
|
||||
struct platform_device *spi_flash;
|
||||
struct clk_hw *i2c_clk;
|
||||
struct timer_list watchdog;
|
||||
const struct ocp_attr_group *attr_tbl;
|
||||
const struct attribute_group **attr_group;
|
||||
const struct ptp_ocp_eeprom_map *eeprom_map;
|
||||
struct dentry *debug_root;
|
||||
time64_t gnss_lost;
|
||||
@ -841,7 +841,7 @@ __ptp_ocp_adjtime_locked(struct ptp_ocp *bp, u32 adj_val)
|
||||
}
|
||||
|
||||
static void
|
||||
ptp_ocp_adjtime_coarse(struct ptp_ocp *bp, u64 delta_ns)
|
||||
ptp_ocp_adjtime_coarse(struct ptp_ocp *bp, s64 delta_ns)
|
||||
{
|
||||
struct timespec64 ts;
|
||||
unsigned long flags;
|
||||
@ -850,7 +850,8 @@ ptp_ocp_adjtime_coarse(struct ptp_ocp *bp, u64 delta_ns)
|
||||
spin_lock_irqsave(&bp->lock, flags);
|
||||
err = __ptp_ocp_gettime_locked(bp, &ts, NULL);
|
||||
if (likely(!err)) {
|
||||
timespec64_add_ns(&ts, delta_ns);
|
||||
set_normalized_timespec64(&ts, ts.tv_sec,
|
||||
ts.tv_nsec + delta_ns);
|
||||
__ptp_ocp_settime_locked(bp, &ts);
|
||||
}
|
||||
spin_unlock_irqrestore(&bp->lock, flags);
|
||||
@ -1835,6 +1836,42 @@ ptp_ocp_signal_init(struct ptp_ocp *bp)
|
||||
bp->signal_out[i]->mem);
|
||||
}
|
||||
|
||||
static void
|
||||
ptp_ocp_attr_group_del(struct ptp_ocp *bp)
|
||||
{
|
||||
sysfs_remove_groups(&bp->dev.kobj, bp->attr_group);
|
||||
kfree(bp->attr_group);
|
||||
}
|
||||
|
||||
static int
|
||||
ptp_ocp_attr_group_add(struct ptp_ocp *bp,
|
||||
const struct ocp_attr_group *attr_tbl)
|
||||
{
|
||||
int count, i;
|
||||
int err;
|
||||
|
||||
count = 0;
|
||||
for (i = 0; attr_tbl[i].cap; i++)
|
||||
if (attr_tbl[i].cap & bp->fw_cap)
|
||||
count++;
|
||||
|
||||
bp->attr_group = kcalloc(count + 1, sizeof(struct attribute_group *),
|
||||
GFP_KERNEL);
|
||||
if (!bp->attr_group)
|
||||
return -ENOMEM;
|
||||
|
||||
count = 0;
|
||||
for (i = 0; attr_tbl[i].cap; i++)
|
||||
if (attr_tbl[i].cap & bp->fw_cap)
|
||||
bp->attr_group[count++] = attr_tbl[i].group;
|
||||
|
||||
err = sysfs_create_groups(&bp->dev.kobj, bp->attr_group);
|
||||
if (err)
|
||||
bp->attr_group[0] = NULL;
|
||||
|
||||
return err;
|
||||
}
|
||||
|
||||
static void
|
||||
ptp_ocp_sma_init(struct ptp_ocp *bp)
|
||||
{
|
||||
@ -1904,7 +1941,6 @@ ptp_ocp_fb_board_init(struct ptp_ocp *bp, struct ocp_resource *r)
|
||||
bp->flash_start = 1024 * 4096;
|
||||
bp->eeprom_map = fb_eeprom_map;
|
||||
bp->fw_version = ioread32(&bp->image->version);
|
||||
bp->attr_tbl = fb_timecard_groups;
|
||||
bp->fw_cap = OCP_CAP_BASIC;
|
||||
|
||||
ver = bp->fw_version & 0xffff;
|
||||
@ -1918,6 +1954,10 @@ ptp_ocp_fb_board_init(struct ptp_ocp *bp, struct ocp_resource *r)
|
||||
ptp_ocp_sma_init(bp);
|
||||
ptp_ocp_signal_init(bp);
|
||||
|
||||
err = ptp_ocp_attr_group_add(bp, fb_timecard_groups);
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
err = ptp_ocp_fb_set_pins(bp);
|
||||
if (err)
|
||||
return err;
|
||||
@ -3388,7 +3428,6 @@ ptp_ocp_complete(struct ptp_ocp *bp)
|
||||
{
|
||||
struct pps_device *pps;
|
||||
char buf[32];
|
||||
int i, err;
|
||||
|
||||
if (bp->gnss_port != -1) {
|
||||
sprintf(buf, "ttyS%d", bp->gnss_port);
|
||||
@ -3413,14 +3452,6 @@ ptp_ocp_complete(struct ptp_ocp *bp)
|
||||
if (pps)
|
||||
ptp_ocp_symlink(bp, pps->dev, "pps");
|
||||
|
||||
for (i = 0; bp->attr_tbl[i].cap; i++) {
|
||||
if (!(bp->attr_tbl[i].cap & bp->fw_cap))
|
||||
continue;
|
||||
err = sysfs_create_group(&bp->dev.kobj, bp->attr_tbl[i].group);
|
||||
if (err)
|
||||
return err;
|
||||
}
|
||||
|
||||
ptp_ocp_debugfs_add_device(bp);
|
||||
|
||||
return 0;
|
||||
@ -3492,15 +3523,11 @@ static void
|
||||
ptp_ocp_detach_sysfs(struct ptp_ocp *bp)
|
||||
{
|
||||
struct device *dev = &bp->dev;
|
||||
int i;
|
||||
|
||||
sysfs_remove_link(&dev->kobj, "ttyGNSS");
|
||||
sysfs_remove_link(&dev->kobj, "ttyMAC");
|
||||
sysfs_remove_link(&dev->kobj, "ptp");
|
||||
sysfs_remove_link(&dev->kobj, "pps");
|
||||
if (bp->attr_tbl)
|
||||
for (i = 0; bp->attr_tbl[i].cap; i++)
|
||||
sysfs_remove_group(&dev->kobj, bp->attr_tbl[i].group);
|
||||
}
|
||||
|
||||
static void
|
||||
@ -3510,6 +3537,7 @@ ptp_ocp_detach(struct ptp_ocp *bp)
|
||||
|
||||
ptp_ocp_debugfs_remove_device(bp);
|
||||
ptp_ocp_detach_sysfs(bp);
|
||||
ptp_ocp_attr_group_del(bp);
|
||||
if (timer_pending(&bp->watchdog))
|
||||
del_timer_sync(&bp->watchdog);
|
||||
if (bp->ts0)
|
||||
|
@ -900,7 +900,7 @@ struct net_device_path_stack {
|
||||
|
||||
struct net_device_path_ctx {
|
||||
const struct net_device *dev;
|
||||
const u8 *daddr;
|
||||
u8 daddr[ETH_ALEN];
|
||||
|
||||
int num_vlans;
|
||||
struct {
|
||||
|
@ -71,7 +71,6 @@ struct inet_timewait_sock {
|
||||
tw_tos : 8;
|
||||
u32 tw_txhash;
|
||||
u32 tw_priority;
|
||||
u32 tw_bslot; /* bind bucket slot */
|
||||
struct timer_list tw_timer;
|
||||
struct inet_bind_bucket *tw_tb;
|
||||
};
|
||||
@ -110,6 +109,8 @@ static inline void inet_twsk_reschedule(struct inet_timewait_sock *tw, int timeo
|
||||
|
||||
void inet_twsk_deschedule_put(struct inet_timewait_sock *tw);
|
||||
|
||||
void inet_twsk_purge(struct inet_hashinfo *hashinfo, int family);
|
||||
|
||||
static inline
|
||||
struct net *twsk_net(const struct inet_timewait_sock *twsk)
|
||||
{
|
||||
|
@ -56,6 +56,7 @@ struct inet_skb_parm {
|
||||
#define IPSKB_DOREDIRECT BIT(5)
|
||||
#define IPSKB_FRAG_PMTU BIT(6)
|
||||
#define IPSKB_L3SLAVE BIT(7)
|
||||
#define IPSKB_NOPOLICY BIT(8)
|
||||
|
||||
u16 frag_max_size;
|
||||
};
|
||||
|
@ -1093,6 +1093,18 @@ static inline bool __xfrm_check_nopolicy(struct net *net, struct sk_buff *skb,
|
||||
return false;
|
||||
}
|
||||
|
||||
static inline bool __xfrm_check_dev_nopolicy(struct sk_buff *skb,
|
||||
int dir, unsigned short family)
|
||||
{
|
||||
if (dir != XFRM_POLICY_OUT && family == AF_INET) {
|
||||
/* same dst may be used for traffic originating from
|
||||
* devices with different policy settings.
|
||||
*/
|
||||
return IPCB(skb)->flags & IPSKB_NOPOLICY;
|
||||
}
|
||||
return skb_dst(skb) && (skb_dst(skb)->flags & DST_NOPOLICY);
|
||||
}
|
||||
|
||||
static inline int __xfrm_policy_check2(struct sock *sk, int dir,
|
||||
struct sk_buff *skb,
|
||||
unsigned int family, int reverse)
|
||||
@ -1104,7 +1116,7 @@ static inline int __xfrm_policy_check2(struct sock *sk, int dir,
|
||||
return __xfrm_policy_check(sk, ndir, skb, family);
|
||||
|
||||
return __xfrm_check_nopolicy(net, skb, dir) ||
|
||||
(skb_dst(skb) && (skb_dst(skb)->flags & DST_NOPOLICY)) ||
|
||||
__xfrm_check_dev_nopolicy(skb, dir, family) ||
|
||||
__xfrm_policy_check(sk, ndir, skb, family);
|
||||
}
|
||||
|
||||
|
@ -39,6 +39,13 @@ static int br_pass_frame_up(struct sk_buff *skb)
|
||||
dev_sw_netstats_rx_add(brdev, skb->len);
|
||||
|
||||
vg = br_vlan_group_rcu(br);
|
||||
|
||||
/* Reset the offload_fwd_mark because there could be a stacked
|
||||
* bridge above, and it should not think this bridge it doing
|
||||
* that bridge's work forwarding out its ports.
|
||||
*/
|
||||
br_switchdev_frame_unmark(skb);
|
||||
|
||||
/* Bridge is just like any other port. Make sure the
|
||||
* packet is allowed except in promisc mode when someone
|
||||
* may be running packet capture.
|
||||
|
@ -681,11 +681,11 @@ int dev_fill_forward_path(const struct net_device *dev, const u8 *daddr,
|
||||
const struct net_device *last_dev;
|
||||
struct net_device_path_ctx ctx = {
|
||||
.dev = dev,
|
||||
.daddr = daddr,
|
||||
};
|
||||
struct net_device_path *path;
|
||||
int ret = 0;
|
||||
|
||||
memcpy(ctx.daddr, daddr, sizeof(ctx.daddr));
|
||||
stack->num_paths = 0;
|
||||
while (ctx.dev && ctx.dev->netdev_ops->ndo_fill_forward_path) {
|
||||
last_dev = ctx.dev;
|
||||
|
@ -1030,9 +1030,15 @@ static void __net_exit dccp_v4_exit_net(struct net *net)
|
||||
inet_ctl_sock_destroy(pn->v4_ctl_sk);
|
||||
}
|
||||
|
||||
static void __net_exit dccp_v4_exit_batch(struct list_head *net_exit_list)
|
||||
{
|
||||
inet_twsk_purge(&dccp_hashinfo, AF_INET);
|
||||
}
|
||||
|
||||
static struct pernet_operations dccp_v4_ops = {
|
||||
.init = dccp_v4_init_net,
|
||||
.exit = dccp_v4_exit_net,
|
||||
.exit_batch = dccp_v4_exit_batch,
|
||||
.id = &dccp_v4_pernet_id,
|
||||
.size = sizeof(struct dccp_v4_pernet),
|
||||
};
|
||||
|
@ -1115,9 +1115,15 @@ static void __net_exit dccp_v6_exit_net(struct net *net)
|
||||
inet_ctl_sock_destroy(pn->v6_ctl_sk);
|
||||
}
|
||||
|
||||
static void __net_exit dccp_v6_exit_batch(struct list_head *net_exit_list)
|
||||
{
|
||||
inet_twsk_purge(&dccp_hashinfo, AF_INET6);
|
||||
}
|
||||
|
||||
static struct pernet_operations dccp_v6_ops = {
|
||||
.init = dccp_v6_init_net,
|
||||
.exit = dccp_v6_exit_net,
|
||||
.exit_batch = dccp_v6_exit_batch,
|
||||
.id = &dccp_v6_pernet_id,
|
||||
.size = sizeof(struct dccp_v6_pernet),
|
||||
};
|
||||
|
@ -52,7 +52,8 @@ static void inet_twsk_kill(struct inet_timewait_sock *tw)
|
||||
spin_unlock(lock);
|
||||
|
||||
/* Disassociate with bind bucket. */
|
||||
bhead = &hashinfo->bhash[tw->tw_bslot];
|
||||
bhead = &hashinfo->bhash[inet_bhashfn(twsk_net(tw), tw->tw_num,
|
||||
hashinfo->bhash_size)];
|
||||
|
||||
spin_lock(&bhead->lock);
|
||||
inet_twsk_bind_unhash(tw, hashinfo);
|
||||
@ -111,12 +112,8 @@ void inet_twsk_hashdance(struct inet_timewait_sock *tw, struct sock *sk,
|
||||
Note, that any socket with inet->num != 0 MUST be bound in
|
||||
binding cache, even if it is closed.
|
||||
*/
|
||||
/* Cache inet_bhashfn(), because 'struct net' might be no longer
|
||||
* available later in inet_twsk_kill().
|
||||
*/
|
||||
tw->tw_bslot = inet_bhashfn(twsk_net(tw), inet->inet_num,
|
||||
hashinfo->bhash_size);
|
||||
bhead = &hashinfo->bhash[tw->tw_bslot];
|
||||
bhead = &hashinfo->bhash[inet_bhashfn(twsk_net(tw), inet->inet_num,
|
||||
hashinfo->bhash_size)];
|
||||
spin_lock(&bhead->lock);
|
||||
tw->tw_tb = icsk->icsk_bind_hash;
|
||||
WARN_ON(!icsk->icsk_bind_hash);
|
||||
@ -257,3 +254,50 @@ void __inet_twsk_schedule(struct inet_timewait_sock *tw, int timeo, bool rearm)
|
||||
}
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(__inet_twsk_schedule);
|
||||
|
||||
void inet_twsk_purge(struct inet_hashinfo *hashinfo, int family)
|
||||
{
|
||||
struct inet_timewait_sock *tw;
|
||||
struct sock *sk;
|
||||
struct hlist_nulls_node *node;
|
||||
unsigned int slot;
|
||||
|
||||
for (slot = 0; slot <= hashinfo->ehash_mask; slot++) {
|
||||
struct inet_ehash_bucket *head = &hashinfo->ehash[slot];
|
||||
restart_rcu:
|
||||
cond_resched();
|
||||
rcu_read_lock();
|
||||
restart:
|
||||
sk_nulls_for_each_rcu(sk, node, &head->chain) {
|
||||
if (sk->sk_state != TCP_TIME_WAIT)
|
||||
continue;
|
||||
tw = inet_twsk(sk);
|
||||
if ((tw->tw_family != family) ||
|
||||
refcount_read(&twsk_net(tw)->ns.count))
|
||||
continue;
|
||||
|
||||
if (unlikely(!refcount_inc_not_zero(&tw->tw_refcnt)))
|
||||
continue;
|
||||
|
||||
if (unlikely((tw->tw_family != family) ||
|
||||
refcount_read(&twsk_net(tw)->ns.count))) {
|
||||
inet_twsk_put(tw);
|
||||
goto restart;
|
||||
}
|
||||
|
||||
rcu_read_unlock();
|
||||
local_bh_disable();
|
||||
inet_twsk_deschedule_put(tw);
|
||||
local_bh_enable();
|
||||
goto restart_rcu;
|
||||
}
|
||||
/* If the nulls value we got at the end of this lookup is
|
||||
* not the expected one, we must restart lookup.
|
||||
* We probably met an item that was moved to another chain.
|
||||
*/
|
||||
if (get_nulls_value(node) != slot)
|
||||
goto restart;
|
||||
rcu_read_unlock();
|
||||
}
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(inet_twsk_purge);
|
||||
|
@ -1726,6 +1726,7 @@ static int ip_route_input_mc(struct sk_buff *skb, __be32 daddr, __be32 saddr,
|
||||
struct in_device *in_dev = __in_dev_get_rcu(dev);
|
||||
unsigned int flags = RTCF_MULTICAST;
|
||||
struct rtable *rth;
|
||||
bool no_policy;
|
||||
u32 itag = 0;
|
||||
int err;
|
||||
|
||||
@ -1736,8 +1737,12 @@ static int ip_route_input_mc(struct sk_buff *skb, __be32 daddr, __be32 saddr,
|
||||
if (our)
|
||||
flags |= RTCF_LOCAL;
|
||||
|
||||
no_policy = IN_DEV_ORCONF(in_dev, NOPOLICY);
|
||||
if (no_policy)
|
||||
IPCB(skb)->flags |= IPSKB_NOPOLICY;
|
||||
|
||||
rth = rt_dst_alloc(dev_net(dev)->loopback_dev, flags, RTN_MULTICAST,
|
||||
IN_DEV_ORCONF(in_dev, NOPOLICY), false);
|
||||
no_policy, false);
|
||||
if (!rth)
|
||||
return -ENOBUFS;
|
||||
|
||||
@ -1796,7 +1801,7 @@ static int __mkroute_input(struct sk_buff *skb,
|
||||
struct rtable *rth;
|
||||
int err;
|
||||
struct in_device *out_dev;
|
||||
bool do_cache;
|
||||
bool do_cache, no_policy;
|
||||
u32 itag = 0;
|
||||
|
||||
/* get a working reference to the output device */
|
||||
@ -1841,6 +1846,10 @@ static int __mkroute_input(struct sk_buff *skb,
|
||||
}
|
||||
}
|
||||
|
||||
no_policy = IN_DEV_ORCONF(in_dev, NOPOLICY);
|
||||
if (no_policy)
|
||||
IPCB(skb)->flags |= IPSKB_NOPOLICY;
|
||||
|
||||
fnhe = find_exception(nhc, daddr);
|
||||
if (do_cache) {
|
||||
if (fnhe)
|
||||
@ -1853,8 +1862,7 @@ static int __mkroute_input(struct sk_buff *skb,
|
||||
}
|
||||
}
|
||||
|
||||
rth = rt_dst_alloc(out_dev->dev, 0, res->type,
|
||||
IN_DEV_ORCONF(in_dev, NOPOLICY),
|
||||
rth = rt_dst_alloc(out_dev->dev, 0, res->type, no_policy,
|
||||
IN_DEV_ORCONF(out_dev, NOXFRM));
|
||||
if (!rth) {
|
||||
err = -ENOBUFS;
|
||||
@ -2229,6 +2237,7 @@ static int ip_route_input_slow(struct sk_buff *skb, __be32 daddr, __be32 saddr,
|
||||
struct rtable *rth;
|
||||
struct flowi4 fl4;
|
||||
bool do_cache = true;
|
||||
bool no_policy;
|
||||
|
||||
/* IP on this device is disabled. */
|
||||
|
||||
@ -2347,6 +2356,10 @@ brd_input:
|
||||
RT_CACHE_STAT_INC(in_brd);
|
||||
|
||||
local_input:
|
||||
no_policy = IN_DEV_ORCONF(in_dev, NOPOLICY);
|
||||
if (no_policy)
|
||||
IPCB(skb)->flags |= IPSKB_NOPOLICY;
|
||||
|
||||
do_cache &= res->fi && !itag;
|
||||
if (do_cache) {
|
||||
struct fib_nh_common *nhc = FIB_RES_NHC(*res);
|
||||
@ -2361,7 +2374,7 @@ local_input:
|
||||
|
||||
rth = rt_dst_alloc(ip_rt_get_dev(net, res),
|
||||
flags | RTCF_LOCAL, res->type,
|
||||
IN_DEV_ORCONF(in_dev, NOPOLICY), false);
|
||||
no_policy, false);
|
||||
if (!rth)
|
||||
goto e_nobufs;
|
||||
|
||||
|
@ -3173,6 +3173,8 @@ static void __net_exit tcp_sk_exit_batch(struct list_head *net_exit_list)
|
||||
{
|
||||
struct net *net;
|
||||
|
||||
inet_twsk_purge(&tcp_hashinfo, AF_INET);
|
||||
|
||||
list_for_each_entry(net, net_exit_list, exit_list)
|
||||
tcp_fastopen_ctx_destroy(net);
|
||||
}
|
||||
|
@ -2207,9 +2207,15 @@ static void __net_exit tcpv6_net_exit(struct net *net)
|
||||
inet_ctl_sock_destroy(net->ipv6.tcp_sk);
|
||||
}
|
||||
|
||||
static void __net_exit tcpv6_net_exit_batch(struct list_head *net_exit_list)
|
||||
{
|
||||
inet_twsk_purge(&tcp_hashinfo, AF_INET6);
|
||||
}
|
||||
|
||||
static struct pernet_operations tcpv6_net_ops = {
|
||||
.init = tcpv6_net_init,
|
||||
.exit = tcpv6_net_exit,
|
||||
.exit_batch = tcpv6_net_exit_batch,
|
||||
};
|
||||
|
||||
int __init tcpv6_init(void)
|
||||
|
@ -2826,8 +2826,10 @@ static int pfkey_process(struct sock *sk, struct sk_buff *skb, const struct sadb
|
||||
void *ext_hdrs[SADB_EXT_MAX];
|
||||
int err;
|
||||
|
||||
pfkey_broadcast(skb_clone(skb, GFP_KERNEL), GFP_KERNEL,
|
||||
BROADCAST_PROMISC_ONLY, NULL, sock_net(sk));
|
||||
err = pfkey_broadcast(skb_clone(skb, GFP_KERNEL), GFP_KERNEL,
|
||||
BROADCAST_PROMISC_ONLY, NULL, sock_net(sk));
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
memset(ext_hdrs, 0, sizeof(ext_hdrs));
|
||||
err = parse_exthdrs(skb, hdr, ext_hdrs);
|
||||
@ -2898,7 +2900,7 @@ static int count_ah_combs(const struct xfrm_tmpl *t)
|
||||
break;
|
||||
if (!aalg->pfkey_supported)
|
||||
continue;
|
||||
if (aalg_tmpl_set(t, aalg))
|
||||
if (aalg_tmpl_set(t, aalg) && aalg->available)
|
||||
sz += sizeof(struct sadb_comb);
|
||||
}
|
||||
return sz + sizeof(struct sadb_prop);
|
||||
@ -2916,7 +2918,7 @@ static int count_esp_combs(const struct xfrm_tmpl *t)
|
||||
if (!ealg->pfkey_supported)
|
||||
continue;
|
||||
|
||||
if (!(ealg_tmpl_set(t, ealg)))
|
||||
if (!(ealg_tmpl_set(t, ealg) && ealg->available))
|
||||
continue;
|
||||
|
||||
for (k = 1; ; k++) {
|
||||
@ -2927,7 +2929,7 @@ static int count_esp_combs(const struct xfrm_tmpl *t)
|
||||
if (!aalg->pfkey_supported)
|
||||
continue;
|
||||
|
||||
if (aalg_tmpl_set(t, aalg))
|
||||
if (aalg_tmpl_set(t, aalg) && aalg->available)
|
||||
sz += sizeof(struct sadb_comb);
|
||||
}
|
||||
}
|
||||
|
@ -107,7 +107,7 @@ static void mptcp_parse_option(const struct sk_buff *skb,
|
||||
ptr += 2;
|
||||
}
|
||||
if (opsize == TCPOLEN_MPTCP_MPC_ACK_DATA_CSUM) {
|
||||
mp_opt->csum = (__force __sum16)get_unaligned_be16(ptr);
|
||||
mp_opt->csum = get_unaligned((__force __sum16 *)ptr);
|
||||
mp_opt->suboptions |= OPTION_MPTCP_CSUMREQD;
|
||||
ptr += 2;
|
||||
}
|
||||
@ -221,7 +221,7 @@ static void mptcp_parse_option(const struct sk_buff *skb,
|
||||
|
||||
if (opsize == expected_opsize + TCPOLEN_MPTCP_DSS_CHECKSUM) {
|
||||
mp_opt->suboptions |= OPTION_MPTCP_CSUMREQD;
|
||||
mp_opt->csum = (__force __sum16)get_unaligned_be16(ptr);
|
||||
mp_opt->csum = get_unaligned((__force __sum16 *)ptr);
|
||||
ptr += 2;
|
||||
}
|
||||
|
||||
@ -1240,7 +1240,7 @@ static void mptcp_set_rwin(const struct tcp_sock *tp)
|
||||
WRITE_ONCE(msk->rcv_wnd_sent, ack_seq);
|
||||
}
|
||||
|
||||
u16 __mptcp_make_csum(u64 data_seq, u32 subflow_seq, u16 data_len, __wsum sum)
|
||||
__sum16 __mptcp_make_csum(u64 data_seq, u32 subflow_seq, u16 data_len, __wsum sum)
|
||||
{
|
||||
struct csum_pseudo_header header;
|
||||
__wsum csum;
|
||||
@ -1256,15 +1256,25 @@ u16 __mptcp_make_csum(u64 data_seq, u32 subflow_seq, u16 data_len, __wsum sum)
|
||||
header.csum = 0;
|
||||
|
||||
csum = csum_partial(&header, sizeof(header), sum);
|
||||
return (__force u16)csum_fold(csum);
|
||||
return csum_fold(csum);
|
||||
}
|
||||
|
||||
static u16 mptcp_make_csum(const struct mptcp_ext *mpext)
|
||||
static __sum16 mptcp_make_csum(const struct mptcp_ext *mpext)
|
||||
{
|
||||
return __mptcp_make_csum(mpext->data_seq, mpext->subflow_seq, mpext->data_len,
|
||||
~csum_unfold(mpext->csum));
|
||||
}
|
||||
|
||||
static void put_len_csum(u16 len, __sum16 csum, void *data)
|
||||
{
|
||||
__sum16 *sumptr = data + 2;
|
||||
__be16 *ptr = data;
|
||||
|
||||
put_unaligned_be16(len, ptr);
|
||||
|
||||
put_unaligned(csum, sumptr);
|
||||
}
|
||||
|
||||
void mptcp_write_options(__be32 *ptr, const struct tcp_sock *tp,
|
||||
struct mptcp_out_options *opts)
|
||||
{
|
||||
@ -1340,8 +1350,9 @@ void mptcp_write_options(__be32 *ptr, const struct tcp_sock *tp,
|
||||
put_unaligned_be32(mpext->subflow_seq, ptr);
|
||||
ptr += 1;
|
||||
if (opts->csum_reqd) {
|
||||
put_unaligned_be32(mpext->data_len << 16 |
|
||||
mptcp_make_csum(mpext), ptr);
|
||||
put_len_csum(mpext->data_len,
|
||||
mptcp_make_csum(mpext),
|
||||
ptr);
|
||||
} else {
|
||||
put_unaligned_be32(mpext->data_len << 16 |
|
||||
TCPOPT_NOP << 8 | TCPOPT_NOP, ptr);
|
||||
@ -1392,11 +1403,12 @@ void mptcp_write_options(__be32 *ptr, const struct tcp_sock *tp,
|
||||
goto mp_capable_done;
|
||||
|
||||
if (opts->csum_reqd) {
|
||||
put_unaligned_be32(opts->data_len << 16 |
|
||||
__mptcp_make_csum(opts->data_seq,
|
||||
opts->subflow_seq,
|
||||
opts->data_len,
|
||||
~csum_unfold(opts->csum)), ptr);
|
||||
put_len_csum(opts->data_len,
|
||||
__mptcp_make_csum(opts->data_seq,
|
||||
opts->subflow_seq,
|
||||
opts->data_len,
|
||||
~csum_unfold(opts->csum)),
|
||||
ptr);
|
||||
} else {
|
||||
put_unaligned_be32(opts->data_len << 16 |
|
||||
TCPOPT_NOP << 8 | TCPOPT_NOP, ptr);
|
||||
|
@ -178,14 +178,13 @@ void mptcp_pm_subflow_check_next(struct mptcp_sock *msk, const struct sock *ssk,
|
||||
struct mptcp_pm_data *pm = &msk->pm;
|
||||
bool update_subflows;
|
||||
|
||||
update_subflows = (ssk->sk_state == TCP_CLOSE) &&
|
||||
(subflow->request_join || subflow->mp_join);
|
||||
update_subflows = subflow->request_join || subflow->mp_join;
|
||||
if (!READ_ONCE(pm->work_pending) && !update_subflows)
|
||||
return;
|
||||
|
||||
spin_lock_bh(&pm->lock);
|
||||
if (update_subflows)
|
||||
pm->subflows--;
|
||||
__mptcp_pm_close_subflow(msk);
|
||||
|
||||
/* Even if this subflow is not really established, tell the PM to try
|
||||
* to pick the next ones, if possible.
|
||||
|
@ -443,7 +443,8 @@ struct mptcp_subflow_context {
|
||||
can_ack : 1, /* only after processing the remote a key */
|
||||
disposable : 1, /* ctx can be free at ulp release time */
|
||||
stale : 1, /* unable to snd/rcv data, do not use for xmit */
|
||||
local_id_valid : 1; /* local_id is correctly initialized */
|
||||
local_id_valid : 1, /* local_id is correctly initialized */
|
||||
valid_csum_seen : 1; /* at least one csum validated */
|
||||
enum mptcp_data_avail data_avail;
|
||||
u32 remote_nonce;
|
||||
u64 thmac;
|
||||
@ -723,7 +724,7 @@ void mptcp_token_destroy(struct mptcp_sock *msk);
|
||||
void mptcp_crypto_key_sha(u64 key, u32 *token, u64 *idsn);
|
||||
|
||||
void mptcp_crypto_hmac_sha(u64 key1, u64 key2, u8 *msg, int len, void *hmac);
|
||||
u16 __mptcp_make_csum(u64 data_seq, u32 subflow_seq, u16 data_len, __wsum sum);
|
||||
__sum16 __mptcp_make_csum(u64 data_seq, u32 subflow_seq, u16 data_len, __wsum sum);
|
||||
|
||||
void __init mptcp_pm_init(void);
|
||||
void mptcp_pm_data_init(struct mptcp_sock *msk);
|
||||
@ -833,6 +834,20 @@ unsigned int mptcp_pm_get_add_addr_accept_max(const struct mptcp_sock *msk);
|
||||
unsigned int mptcp_pm_get_subflows_max(const struct mptcp_sock *msk);
|
||||
unsigned int mptcp_pm_get_local_addr_max(const struct mptcp_sock *msk);
|
||||
|
||||
/* called under PM lock */
|
||||
static inline void __mptcp_pm_close_subflow(struct mptcp_sock *msk)
|
||||
{
|
||||
if (--msk->pm.subflows < mptcp_pm_get_subflows_max(msk))
|
||||
WRITE_ONCE(msk->pm.accept_subflow, true);
|
||||
}
|
||||
|
||||
static inline void mptcp_pm_close_subflow(struct mptcp_sock *msk)
|
||||
{
|
||||
spin_lock_bh(&msk->pm.lock);
|
||||
__mptcp_pm_close_subflow(msk);
|
||||
spin_unlock_bh(&msk->pm.lock);
|
||||
}
|
||||
|
||||
void mptcp_sockopt_sync(struct mptcp_sock *msk, struct sock *ssk);
|
||||
void mptcp_sockopt_sync_locked(struct mptcp_sock *msk, struct sock *ssk);
|
||||
|
||||
|
@ -888,7 +888,7 @@ static enum mapping_status validate_data_csum(struct sock *ssk, struct sk_buff *
|
||||
{
|
||||
struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(ssk);
|
||||
u32 offset, seq, delta;
|
||||
u16 csum;
|
||||
__sum16 csum;
|
||||
int len;
|
||||
|
||||
if (!csum_reqd)
|
||||
@ -955,11 +955,14 @@ static enum mapping_status validate_data_csum(struct sock *ssk, struct sk_buff *
|
||||
subflow->map_data_csum);
|
||||
if (unlikely(csum)) {
|
||||
MPTCP_INC_STATS(sock_net(ssk), MPTCP_MIB_DATACSUMERR);
|
||||
subflow->send_mp_fail = 1;
|
||||
MPTCP_INC_STATS(sock_net(ssk), MPTCP_MIB_MPFAILTX);
|
||||
if (subflow->mp_join || subflow->valid_csum_seen) {
|
||||
subflow->send_mp_fail = 1;
|
||||
MPTCP_INC_STATS(sock_net(ssk), MPTCP_MIB_MPFAILTX);
|
||||
}
|
||||
return subflow->mp_join ? MAPPING_INVALID : MAPPING_DUMMY;
|
||||
}
|
||||
|
||||
subflow->valid_csum_seen = 1;
|
||||
return MAPPING_OK;
|
||||
}
|
||||
|
||||
@ -1141,6 +1144,18 @@ static void subflow_sched_work_if_closed(struct mptcp_sock *msk, struct sock *ss
|
||||
}
|
||||
}
|
||||
|
||||
static bool subflow_can_fallback(struct mptcp_subflow_context *subflow)
|
||||
{
|
||||
struct mptcp_sock *msk = mptcp_sk(subflow->conn);
|
||||
|
||||
if (subflow->mp_join)
|
||||
return false;
|
||||
else if (READ_ONCE(msk->csum_enabled))
|
||||
return !subflow->valid_csum_seen;
|
||||
else
|
||||
return !subflow->fully_established;
|
||||
}
|
||||
|
||||
static bool subflow_check_data_avail(struct sock *ssk)
|
||||
{
|
||||
struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(ssk);
|
||||
@ -1218,7 +1233,7 @@ fallback:
|
||||
return true;
|
||||
}
|
||||
|
||||
if (subflow->mp_join || subflow->fully_established) {
|
||||
if (!subflow_can_fallback(subflow)) {
|
||||
/* fatal protocol error, close the socket.
|
||||
* subflow_error_report() will introduce the appropriate barriers
|
||||
*/
|
||||
@ -1422,20 +1437,20 @@ int __mptcp_subflow_connect(struct sock *sk, const struct mptcp_addr_info *loc,
|
||||
struct sockaddr_storage addr;
|
||||
int remote_id = remote->id;
|
||||
int local_id = loc->id;
|
||||
int err = -ENOTCONN;
|
||||
struct socket *sf;
|
||||
struct sock *ssk;
|
||||
u32 remote_token;
|
||||
int addrlen;
|
||||
int ifindex;
|
||||
u8 flags;
|
||||
int err;
|
||||
|
||||
if (!mptcp_is_fully_established(sk))
|
||||
return -ENOTCONN;
|
||||
goto err_out;
|
||||
|
||||
err = mptcp_subflow_create_socket(sk, &sf);
|
||||
if (err)
|
||||
return err;
|
||||
goto err_out;
|
||||
|
||||
ssk = sf->sk;
|
||||
subflow = mptcp_subflow_ctx(ssk);
|
||||
@ -1492,6 +1507,12 @@ failed_unlink:
|
||||
failed:
|
||||
subflow->disposable = 1;
|
||||
sock_release(sf);
|
||||
|
||||
err_out:
|
||||
/* we account subflows before the creation, and this failures will not
|
||||
* be caught by sk_state_change()
|
||||
*/
|
||||
mptcp_pm_close_subflow(msk);
|
||||
return err;
|
||||
}
|
||||
|
||||
|
@ -179,12 +179,11 @@ EXPORT_SYMBOL_GPL(flow_offload_route_init);
|
||||
|
||||
static void flow_offload_fixup_tcp(struct ip_ct_tcp *tcp)
|
||||
{
|
||||
tcp->state = TCP_CONNTRACK_ESTABLISHED;
|
||||
tcp->seen[0].td_maxwin = 0;
|
||||
tcp->seen[1].td_maxwin = 0;
|
||||
}
|
||||
|
||||
static void flow_offload_fixup_ct_timeout(struct nf_conn *ct)
|
||||
static void flow_offload_fixup_ct(struct nf_conn *ct)
|
||||
{
|
||||
struct net *net = nf_ct_net(ct);
|
||||
int l4num = nf_ct_protonum(ct);
|
||||
@ -193,7 +192,9 @@ static void flow_offload_fixup_ct_timeout(struct nf_conn *ct)
|
||||
if (l4num == IPPROTO_TCP) {
|
||||
struct nf_tcp_net *tn = nf_tcp_pernet(net);
|
||||
|
||||
timeout = tn->timeouts[TCP_CONNTRACK_ESTABLISHED];
|
||||
flow_offload_fixup_tcp(&ct->proto.tcp);
|
||||
|
||||
timeout = tn->timeouts[ct->proto.tcp.state];
|
||||
timeout -= tn->offload_timeout;
|
||||
} else if (l4num == IPPROTO_UDP) {
|
||||
struct nf_udp_net *tn = nf_udp_pernet(net);
|
||||
@ -211,18 +212,6 @@ static void flow_offload_fixup_ct_timeout(struct nf_conn *ct)
|
||||
WRITE_ONCE(ct->timeout, nfct_time_stamp + timeout);
|
||||
}
|
||||
|
||||
static void flow_offload_fixup_ct_state(struct nf_conn *ct)
|
||||
{
|
||||
if (nf_ct_protonum(ct) == IPPROTO_TCP)
|
||||
flow_offload_fixup_tcp(&ct->proto.tcp);
|
||||
}
|
||||
|
||||
static void flow_offload_fixup_ct(struct nf_conn *ct)
|
||||
{
|
||||
flow_offload_fixup_ct_state(ct);
|
||||
flow_offload_fixup_ct_timeout(ct);
|
||||
}
|
||||
|
||||
static void flow_offload_route_release(struct flow_offload *flow)
|
||||
{
|
||||
nft_flow_dst_release(flow, FLOW_OFFLOAD_DIR_ORIGINAL);
|
||||
@ -335,8 +324,10 @@ void flow_offload_refresh(struct nf_flowtable *flow_table,
|
||||
u32 timeout;
|
||||
|
||||
timeout = nf_flowtable_time_stamp + flow_offload_get_timeout(flow);
|
||||
if (READ_ONCE(flow->timeout) != timeout)
|
||||
if (timeout - READ_ONCE(flow->timeout) > HZ)
|
||||
WRITE_ONCE(flow->timeout, timeout);
|
||||
else
|
||||
return;
|
||||
|
||||
if (likely(!nf_flowtable_hw_offload(flow_table)))
|
||||
return;
|
||||
@ -359,22 +350,14 @@ static void flow_offload_del(struct nf_flowtable *flow_table,
|
||||
rhashtable_remove_fast(&flow_table->rhashtable,
|
||||
&flow->tuplehash[FLOW_OFFLOAD_DIR_REPLY].node,
|
||||
nf_flow_offload_rhash_params);
|
||||
|
||||
clear_bit(IPS_OFFLOAD_BIT, &flow->ct->status);
|
||||
|
||||
if (nf_flow_has_expired(flow))
|
||||
flow_offload_fixup_ct(flow->ct);
|
||||
else
|
||||
flow_offload_fixup_ct_timeout(flow->ct);
|
||||
|
||||
flow_offload_free(flow);
|
||||
}
|
||||
|
||||
void flow_offload_teardown(struct flow_offload *flow)
|
||||
{
|
||||
clear_bit(IPS_OFFLOAD_BIT, &flow->ct->status);
|
||||
set_bit(NF_FLOW_TEARDOWN, &flow->flags);
|
||||
|
||||
flow_offload_fixup_ct_state(flow->ct);
|
||||
flow_offload_fixup_ct(flow->ct);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(flow_offload_teardown);
|
||||
|
||||
@ -438,33 +421,12 @@ nf_flow_table_iterate(struct nf_flowtable *flow_table,
|
||||
return err;
|
||||
}
|
||||
|
||||
static bool flow_offload_stale_dst(struct flow_offload_tuple *tuple)
|
||||
{
|
||||
struct dst_entry *dst;
|
||||
|
||||
if (tuple->xmit_type == FLOW_OFFLOAD_XMIT_NEIGH ||
|
||||
tuple->xmit_type == FLOW_OFFLOAD_XMIT_XFRM) {
|
||||
dst = tuple->dst_cache;
|
||||
if (!dst_check(dst, tuple->dst_cookie))
|
||||
return true;
|
||||
}
|
||||
|
||||
return false;
|
||||
}
|
||||
|
||||
static bool nf_flow_has_stale_dst(struct flow_offload *flow)
|
||||
{
|
||||
return flow_offload_stale_dst(&flow->tuplehash[FLOW_OFFLOAD_DIR_ORIGINAL].tuple) ||
|
||||
flow_offload_stale_dst(&flow->tuplehash[FLOW_OFFLOAD_DIR_REPLY].tuple);
|
||||
}
|
||||
|
||||
static void nf_flow_offload_gc_step(struct nf_flowtable *flow_table,
|
||||
struct flow_offload *flow, void *data)
|
||||
{
|
||||
if (nf_flow_has_expired(flow) ||
|
||||
nf_ct_is_dying(flow->ct) ||
|
||||
nf_flow_has_stale_dst(flow))
|
||||
set_bit(NF_FLOW_TEARDOWN, &flow->flags);
|
||||
nf_ct_is_dying(flow->ct))
|
||||
flow_offload_teardown(flow);
|
||||
|
||||
if (test_bit(NF_FLOW_TEARDOWN, &flow->flags)) {
|
||||
if (test_bit(NF_FLOW_HW, &flow->flags)) {
|
||||
|
@ -248,6 +248,15 @@ static bool nf_flow_exceeds_mtu(const struct sk_buff *skb, unsigned int mtu)
|
||||
return true;
|
||||
}
|
||||
|
||||
static inline bool nf_flow_dst_check(struct flow_offload_tuple *tuple)
|
||||
{
|
||||
if (tuple->xmit_type != FLOW_OFFLOAD_XMIT_NEIGH &&
|
||||
tuple->xmit_type != FLOW_OFFLOAD_XMIT_XFRM)
|
||||
return true;
|
||||
|
||||
return dst_check(tuple->dst_cache, tuple->dst_cookie);
|
||||
}
|
||||
|
||||
static unsigned int nf_flow_xmit_xfrm(struct sk_buff *skb,
|
||||
const struct nf_hook_state *state,
|
||||
struct dst_entry *dst)
|
||||
@ -367,6 +376,11 @@ nf_flow_offload_ip_hook(void *priv, struct sk_buff *skb,
|
||||
if (nf_flow_state_check(flow, iph->protocol, skb, thoff))
|
||||
return NF_ACCEPT;
|
||||
|
||||
if (!nf_flow_dst_check(&tuplehash->tuple)) {
|
||||
flow_offload_teardown(flow);
|
||||
return NF_ACCEPT;
|
||||
}
|
||||
|
||||
if (skb_try_make_writable(skb, thoff + hdrsize))
|
||||
return NF_DROP;
|
||||
|
||||
@ -624,6 +638,11 @@ nf_flow_offload_ipv6_hook(void *priv, struct sk_buff *skb,
|
||||
if (nf_flow_state_check(flow, ip6h->nexthdr, skb, thoff))
|
||||
return NF_ACCEPT;
|
||||
|
||||
if (!nf_flow_dst_check(&tuplehash->tuple)) {
|
||||
flow_offload_teardown(flow);
|
||||
return NF_ACCEPT;
|
||||
}
|
||||
|
||||
if (skb_try_make_writable(skb, thoff + hdrsize))
|
||||
return NF_DROP;
|
||||
|
||||
|
@ -8342,16 +8342,7 @@ EXPORT_SYMBOL_GPL(nf_tables_trans_destroy_flush_work);
|
||||
static bool nft_expr_reduce(struct nft_regs_track *track,
|
||||
const struct nft_expr *expr)
|
||||
{
|
||||
if (!expr->ops->reduce) {
|
||||
pr_warn_once("missing reduce for expression %s ",
|
||||
expr->ops->type->name);
|
||||
return false;
|
||||
}
|
||||
|
||||
if (nft_reduce_is_readonly(expr))
|
||||
return false;
|
||||
|
||||
return expr->ops->reduce(track, expr);
|
||||
return false;
|
||||
}
|
||||
|
||||
static int nf_tables_commit_chain_prepare(struct net *net, struct nft_chain *chain)
|
||||
|
@ -36,6 +36,15 @@ static void nft_default_forward_path(struct nf_flow_route *route,
|
||||
route->tuple[dir].xmit_type = nft_xmit_type(dst_cache);
|
||||
}
|
||||
|
||||
static bool nft_is_valid_ether_device(const struct net_device *dev)
|
||||
{
|
||||
if (!dev || (dev->flags & IFF_LOOPBACK) || dev->type != ARPHRD_ETHER ||
|
||||
dev->addr_len != ETH_ALEN || !is_valid_ether_addr(dev->dev_addr))
|
||||
return false;
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
static int nft_dev_fill_forward_path(const struct nf_flow_route *route,
|
||||
const struct dst_entry *dst_cache,
|
||||
const struct nf_conn *ct,
|
||||
@ -47,6 +56,9 @@ static int nft_dev_fill_forward_path(const struct nf_flow_route *route,
|
||||
struct neighbour *n;
|
||||
u8 nud_state;
|
||||
|
||||
if (!nft_is_valid_ether_device(dev))
|
||||
goto out;
|
||||
|
||||
n = dst_neigh_lookup(dst_cache, daddr);
|
||||
if (!n)
|
||||
return -1;
|
||||
@ -60,6 +72,7 @@ static int nft_dev_fill_forward_path(const struct nf_flow_route *route,
|
||||
if (!(nud_state & NUD_VALID))
|
||||
return -1;
|
||||
|
||||
out:
|
||||
return dev_fill_forward_path(dev, ha, stack);
|
||||
}
|
||||
|
||||
@ -78,15 +91,6 @@ struct nft_forward_info {
|
||||
enum flow_offload_xmit_type xmit_type;
|
||||
};
|
||||
|
||||
static bool nft_is_valid_ether_device(const struct net_device *dev)
|
||||
{
|
||||
if (!dev || (dev->flags & IFF_LOOPBACK) || dev->type != ARPHRD_ETHER ||
|
||||
dev->addr_len != ETH_ALEN || !is_valid_ether_addr(dev->dev_addr))
|
||||
return false;
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
static void nft_dev_path_info(const struct net_device_path_stack *stack,
|
||||
struct nft_forward_info *info,
|
||||
unsigned char *ha, struct nf_flowtable *flowtable)
|
||||
@ -119,7 +123,8 @@ static void nft_dev_path_info(const struct net_device_path_stack *stack,
|
||||
info->indev = NULL;
|
||||
break;
|
||||
}
|
||||
info->outdev = path->dev;
|
||||
if (!info->outdev)
|
||||
info->outdev = path->dev;
|
||||
info->encap[info->num_encaps].id = path->encap.id;
|
||||
info->encap[info->num_encaps].proto = path->encap.proto;
|
||||
info->num_encaps++;
|
||||
@ -293,7 +298,8 @@ static void nft_flow_offload_eval(const struct nft_expr *expr,
|
||||
case IPPROTO_TCP:
|
||||
tcph = skb_header_pointer(pkt->skb, nft_thoff(pkt),
|
||||
sizeof(_tcph), &_tcph);
|
||||
if (unlikely(!tcph || tcph->fin || tcph->rst))
|
||||
if (unlikely(!tcph || tcph->fin || tcph->rst ||
|
||||
!nf_conntrack_tcp_established(ct)))
|
||||
goto out;
|
||||
break;
|
||||
case IPPROTO_UDP:
|
||||
|
@ -118,7 +118,7 @@ static int nci_queue_tx_data_frags(struct nci_dev *ndev,
|
||||
|
||||
skb_frag = nci_skb_alloc(ndev,
|
||||
(NCI_DATA_HDR_SIZE + frag_len),
|
||||
GFP_KERNEL);
|
||||
GFP_ATOMIC);
|
||||
if (skb_frag == NULL) {
|
||||
rc = -ENOMEM;
|
||||
goto free_exit;
|
||||
|
@ -153,7 +153,7 @@ static int nci_hci_send_data(struct nci_dev *ndev, u8 pipe,
|
||||
|
||||
i = 0;
|
||||
skb = nci_skb_alloc(ndev, conn_info->max_pkt_payload_len +
|
||||
NCI_DATA_HDR_SIZE, GFP_KERNEL);
|
||||
NCI_DATA_HDR_SIZE, GFP_ATOMIC);
|
||||
if (!skb)
|
||||
return -ENOMEM;
|
||||
|
||||
@ -184,7 +184,7 @@ static int nci_hci_send_data(struct nci_dev *ndev, u8 pipe,
|
||||
if (i < data_len) {
|
||||
skb = nci_skb_alloc(ndev,
|
||||
conn_info->max_pkt_payload_len +
|
||||
NCI_DATA_HDR_SIZE, GFP_KERNEL);
|
||||
NCI_DATA_HDR_SIZE, GFP_ATOMIC);
|
||||
if (!skb)
|
||||
return -ENOMEM;
|
||||
|
||||
|
@ -232,6 +232,10 @@ static int tcf_pedit_init(struct net *net, struct nlattr *nla,
|
||||
for (i = 0; i < p->tcfp_nkeys; ++i) {
|
||||
u32 cur = p->tcfp_keys[i].off;
|
||||
|
||||
/* sanitize the shift value for any later use */
|
||||
p->tcfp_keys[i].shift = min_t(size_t, BITS_PER_TYPE(int) - 1,
|
||||
p->tcfp_keys[i].shift);
|
||||
|
||||
/* The AT option can read a single byte, we can bound the actual
|
||||
* value with uchar max.
|
||||
*/
|
||||
|
@ -3744,7 +3744,7 @@ static int stale_bundle(struct dst_entry *dst)
|
||||
void xfrm_dst_ifdown(struct dst_entry *dst, struct net_device *dev)
|
||||
{
|
||||
while ((dst = xfrm_dst_child(dst)) && dst->xfrm && dst->dev == dev) {
|
||||
dst->dev = dev_net(dev)->loopback_dev;
|
||||
dst->dev = blackhole_netdev;
|
||||
dev_hold(dst->dev);
|
||||
dev_put(dev);
|
||||
}
|
||||
|
@ -86,7 +86,7 @@ TEST_PROGS = bridge_igmp.sh \
|
||||
vxlan_bridge_1d_port_8472.sh \
|
||||
vxlan_bridge_1d.sh \
|
||||
vxlan_bridge_1q_ipv6.sh \
|
||||
vxlan_bridge_1q_port_8472_ipv6.sh
|
||||
vxlan_bridge_1q_port_8472_ipv6.sh \
|
||||
vxlan_bridge_1q_port_8472.sh \
|
||||
vxlan_bridge_1q.sh \
|
||||
vxlan_symmetric_ipv6.sh \
|
||||
|
@ -1444,6 +1444,33 @@ chk_prio_nr()
|
||||
[ "${dump_stats}" = 1 ] && dump_stats
|
||||
}
|
||||
|
||||
chk_subflow_nr()
|
||||
{
|
||||
local need_title="$1"
|
||||
local msg="$2"
|
||||
local subflow_nr=$3
|
||||
local cnt1
|
||||
local cnt2
|
||||
|
||||
if [ -n "${need_title}" ]; then
|
||||
printf "%03u %-36s %s" "${TEST_COUNT}" "${TEST_NAME}" "${msg}"
|
||||
else
|
||||
printf "%-${nr_blank}s %s" " " "${msg}"
|
||||
fi
|
||||
|
||||
cnt1=$(ss -N $ns1 -tOni | grep -c token)
|
||||
cnt2=$(ss -N $ns2 -tOni | grep -c token)
|
||||
if [ "$cnt1" != "$subflow_nr" -o "$cnt2" != "$subflow_nr" ]; then
|
||||
echo "[fail] got $cnt1:$cnt2 subflows expected $subflow_nr"
|
||||
fail_test
|
||||
dump_stats=1
|
||||
else
|
||||
echo "[ ok ]"
|
||||
fi
|
||||
|
||||
[ "${dump_stats}" = 1 ] && ( ss -N $ns1 -tOni ; ss -N $ns1 -tOni | grep token; ip -n $ns1 mptcp endpoint )
|
||||
}
|
||||
|
||||
chk_link_usage()
|
||||
{
|
||||
local ns=$1
|
||||
@ -2556,7 +2583,7 @@ fastclose_tests()
|
||||
fi
|
||||
}
|
||||
|
||||
implicit_tests()
|
||||
endpoint_tests()
|
||||
{
|
||||
# userspace pm type prevents add_addr
|
||||
if reset "implicit EP"; then
|
||||
@ -2578,6 +2605,23 @@ implicit_tests()
|
||||
$ns2 10.0.2.2 id 1 flags signal
|
||||
wait
|
||||
fi
|
||||
|
||||
if reset "delete and re-add"; then
|
||||
pm_nl_set_limits $ns1 1 1
|
||||
pm_nl_set_limits $ns2 1 1
|
||||
pm_nl_add_endpoint $ns2 10.0.2.2 id 2 dev ns2eth2 flags subflow
|
||||
run_tests $ns1 $ns2 10.0.1.1 4 0 0 slow &
|
||||
|
||||
wait_mpj $ns2
|
||||
pm_nl_del_endpoint $ns2 2 10.0.2.2
|
||||
sleep 0.5
|
||||
chk_subflow_nr needtitle "after delete" 1
|
||||
|
||||
pm_nl_add_endpoint $ns2 10.0.2.2 dev ns2eth2 flags subflow
|
||||
wait_mpj $ns2
|
||||
chk_subflow_nr "" "after re-add" 2
|
||||
wait
|
||||
fi
|
||||
}
|
||||
|
||||
# [$1: error message]
|
||||
@ -2624,7 +2668,7 @@ all_tests_sorted=(
|
||||
d@deny_join_id0_tests
|
||||
m@fullmesh_tests
|
||||
z@fastclose_tests
|
||||
I@implicit_tests
|
||||
I@endpoint_tests
|
||||
)
|
||||
|
||||
all_tests_args=""
|
||||
|
Loading…
x
Reference in New Issue
Block a user