Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net

Pull networking fixes from David Miller:

 1) Handle multicast packets properly in fast-RX path of mac80211, from
    Johannes Berg.

 2) Because of a logic bug, the user can't actually force SW
    checksumming on r8152 devices. This makes diagnosis of hw
    checksumming bugs really annoying. Fix from Hayes Wang.

 3) VXLAN route lookup does not take the source and destination ports
    into account, which means IPSEC policies cannot be matched properly.
    Fix from Martynas Pumputis.

 4) Do proper RCU locking in netvsc callbacks, from Stephen Hemminger.

 5) Fix SKB leaks in mlxsw driver, from Arkadi Sharshevsky.

 6) If lwtunnel_fill_encap() fails, we do not abort the netlink message
    construction properly in fib_dump_info(), from David Ahern.

 7) Do not use kernel stack for DMA buffers in atusb driver, from Stefan
    Schmidt.

 8) Openvswitch conntack actions need to maintain a correct checksum,
    fix from Lance Richardson.

 9) ax25_disconnect() is missing a check for ax25->sk being NULL, in
    fact it already checks this, but not in all of the necessary spots.
    Fix from Basil Gunn.

10) Action GET operations in the packet scheduler can erroneously bump
    the reference count of the entry, making it unreleasable. Fix from
    Jamal Hadi Salim. Jamal gives a great set of example command lines
    that trigger this in the commit message.

* git://git.kernel.org/pub/scm/linux/kernel/git/davem/net: (46 commits)
  net sched actions: fix refcnt when GETing of action after bind
  net/mlx4_core: Eliminate warning messages for SRQ_LIMIT under SRIOV
  net/mlx4_core: Fix when to save some qp context flags for dynamic VST to VGT transitions
  net/mlx4_core: Fix racy CQ (Completion Queue) free
  net: stmmac: don't use netdev_[dbg, info, ..] before net_device is registered
  net/mlx5e: Fix a -Wmaybe-uninitialized warning
  ax25: Fix segfault after sock connection timeout
  bpf: rework prog_digest into prog_tag
  tipc: allocate user memory with GFP_KERNEL flag
  net: phy: dp83867: allow RGMII_TXID/RGMII_RXID interface types
  ip6_tunnel: Account for tunnel header in tunnel MTU
  mld: do not remove mld souce list info when set link down
  be2net: fix MAC addr setting on privileged BE3 VFs
  be2net: don't delete MAC on close on unprivileged BE3 VFs
  be2net: fix status check in be_cmd_pmac_add()
  cpmac: remove hopeless #warning
  ravb: do not use zero-length alignment DMA descriptor
  mlx4: do not call napi_schedule() without care
  openvswitch: maintain correct checksum state in conntrack actions
  tcp: fix tcp_fastopen unaligned access complaints on sparc
  ...
This commit is contained in:
Linus Torvalds 2017-01-17 09:33:10 -08:00
commit 4b19a9e20b
60 changed files with 384 additions and 210 deletions

View File

@ -3,9 +3,11 @@
Required properties:
- reg - The ID number for the phy, usually a small integer
- ti,rx-internal-delay - RGMII Receive Clock Delay - see dt-bindings/net/ti-dp83867.h
for applicable values
for applicable values. Required only if interface type is
PHY_INTERFACE_MODE_RGMII_ID or PHY_INTERFACE_MODE_RGMII_RXID
- ti,tx-internal-delay - RGMII Transmit Clock Delay - see dt-bindings/net/ti-dp83867.h
for applicable values
for applicable values. Required only if interface type is
PHY_INTERFACE_MODE_RGMII_ID or PHY_INTERFACE_MODE_RGMII_TXID
- ti,fifo-depth - Transmitt FIFO depth- see dt-bindings/net/ti-dp83867.h
for applicable values

View File

@ -710,11 +710,8 @@ static unsigned int __bcm_sysport_tx_reclaim(struct bcm_sysport_priv *priv,
unsigned int c_index, last_c_index, last_tx_cn, num_tx_cbs;
unsigned int pkts_compl = 0, bytes_compl = 0;
struct bcm_sysport_cb *cb;
struct netdev_queue *txq;
u32 hw_ind;
txq = netdev_get_tx_queue(ndev, ring->index);
/* Compute how many descriptors have been processed since last call */
hw_ind = tdma_readl(priv, TDMA_DESC_RING_PROD_CONS_INDEX(ring->index));
c_index = (hw_ind >> RING_CONS_INDEX_SHIFT) & RING_CONS_INDEX_MASK;
@ -745,9 +742,6 @@ static unsigned int __bcm_sysport_tx_reclaim(struct bcm_sysport_priv *priv,
ring->c_index = c_index;
if (netif_tx_queue_stopped(txq) && pkts_compl)
netif_tx_wake_queue(txq);
netif_dbg(priv, tx_done, ndev,
"ring=%d c_index=%d pkts_compl=%d, bytes_compl=%d\n",
ring->index, ring->c_index, pkts_compl, bytes_compl);
@ -759,16 +753,33 @@ static unsigned int __bcm_sysport_tx_reclaim(struct bcm_sysport_priv *priv,
static unsigned int bcm_sysport_tx_reclaim(struct bcm_sysport_priv *priv,
struct bcm_sysport_tx_ring *ring)
{
struct netdev_queue *txq;
unsigned int released;
unsigned long flags;
txq = netdev_get_tx_queue(priv->netdev, ring->index);
spin_lock_irqsave(&ring->lock, flags);
released = __bcm_sysport_tx_reclaim(priv, ring);
if (released)
netif_tx_wake_queue(txq);
spin_unlock_irqrestore(&ring->lock, flags);
return released;
}
/* Locked version of the per-ring TX reclaim, but does not wake the queue */
static void bcm_sysport_tx_clean(struct bcm_sysport_priv *priv,
struct bcm_sysport_tx_ring *ring)
{
unsigned long flags;
spin_lock_irqsave(&ring->lock, flags);
__bcm_sysport_tx_reclaim(priv, ring);
spin_unlock_irqrestore(&ring->lock, flags);
}
static int bcm_sysport_tx_poll(struct napi_struct *napi, int budget)
{
struct bcm_sysport_tx_ring *ring =
@ -1252,7 +1263,7 @@ static void bcm_sysport_fini_tx_ring(struct bcm_sysport_priv *priv,
napi_disable(&ring->napi);
netif_napi_del(&ring->napi);
bcm_sysport_tx_reclaim(priv, ring);
bcm_sysport_tx_clean(priv, ring);
kfree(ring->cbs);
ring->cbs = NULL;

View File

@ -47,8 +47,9 @@ struct lmac {
struct bgx {
u8 bgx_id;
struct lmac lmac[MAX_LMAC_PER_BGX];
int lmac_count;
u8 lmac_count;
u8 max_lmac;
u8 acpi_lmac_idx;
void __iomem *reg_base;
struct pci_dev *pdev;
bool is_dlm;
@ -1143,13 +1144,13 @@ static acpi_status bgx_acpi_register_phy(acpi_handle handle,
if (acpi_bus_get_device(handle, &adev))
goto out;
acpi_get_mac_address(dev, adev, bgx->lmac[bgx->lmac_count].mac);
acpi_get_mac_address(dev, adev, bgx->lmac[bgx->acpi_lmac_idx].mac);
SET_NETDEV_DEV(&bgx->lmac[bgx->lmac_count].netdev, dev);
SET_NETDEV_DEV(&bgx->lmac[bgx->acpi_lmac_idx].netdev, dev);
bgx->lmac[bgx->lmac_count].lmacid = bgx->lmac_count;
bgx->lmac[bgx->acpi_lmac_idx].lmacid = bgx->acpi_lmac_idx;
bgx->acpi_lmac_idx++; /* move to next LMAC */
out:
bgx->lmac_count++;
return AE_OK;
}

View File

@ -1118,7 +1118,7 @@ int be_cmd_pmac_add(struct be_adapter *adapter, u8 *mac_addr,
err:
mutex_unlock(&adapter->mcc_lock);
if (status == MCC_STATUS_UNAUTHORIZED_REQUEST)
if (base_status(status) == MCC_STATUS_UNAUTHORIZED_REQUEST)
status = -EPERM;
return status;

View File

@ -318,6 +318,13 @@ static int be_mac_addr_set(struct net_device *netdev, void *p)
if (ether_addr_equal(addr->sa_data, adapter->dev_mac))
return 0;
/* BE3 VFs without FILTMGMT privilege are not allowed to set its MAC
* address
*/
if (BEx_chip(adapter) && be_virtfn(adapter) &&
!check_privilege(adapter, BE_PRIV_FILTMGMT))
return -EPERM;
/* if device is not running, copy MAC to netdev->dev_addr */
if (!netif_running(netdev))
goto done;
@ -3609,7 +3616,11 @@ static void be_rx_qs_destroy(struct be_adapter *adapter)
static void be_disable_if_filters(struct be_adapter *adapter)
{
be_dev_mac_del(adapter, adapter->pmac_id[0]);
/* Don't delete MAC on BE3 VFs without FILTMGMT privilege */
if (!BEx_chip(adapter) || !be_virtfn(adapter) ||
check_privilege(adapter, BE_PRIV_FILTMGMT))
be_dev_mac_del(adapter, adapter->pmac_id[0]);
be_clear_uc_list(adapter);
be_clear_mc_list(adapter);
@ -3762,8 +3773,9 @@ static int be_enable_if_filters(struct be_adapter *adapter)
if (status)
return status;
/* For BE3 VFs, the PF programs the initial MAC address */
if (!(BEx_chip(adapter) && be_virtfn(adapter))) {
/* Don't add MAC on BE3 VFs without FILTMGMT privilege */
if (!BEx_chip(adapter) || !be_virtfn(adapter) ||
check_privilege(adapter, BE_PRIV_FILTMGMT)) {
status = be_dev_mac_add(adapter, adapter->netdev->dev_addr);
if (status)
return status;

View File

@ -101,13 +101,19 @@ void mlx4_cq_completion(struct mlx4_dev *dev, u32 cqn)
{
struct mlx4_cq *cq;
rcu_read_lock();
cq = radix_tree_lookup(&mlx4_priv(dev)->cq_table.tree,
cqn & (dev->caps.num_cqs - 1));
rcu_read_unlock();
if (!cq) {
mlx4_dbg(dev, "Completion event for bogus CQ %08x\n", cqn);
return;
}
/* Acessing the CQ outside of rcu_read_lock is safe, because
* the CQ is freed only after interrupt handling is completed.
*/
++cq->arm_sn;
cq->comp(cq);
@ -118,23 +124,19 @@ void mlx4_cq_event(struct mlx4_dev *dev, u32 cqn, int event_type)
struct mlx4_cq_table *cq_table = &mlx4_priv(dev)->cq_table;
struct mlx4_cq *cq;
spin_lock(&cq_table->lock);
rcu_read_lock();
cq = radix_tree_lookup(&cq_table->tree, cqn & (dev->caps.num_cqs - 1));
if (cq)
atomic_inc(&cq->refcount);
spin_unlock(&cq_table->lock);
rcu_read_unlock();
if (!cq) {
mlx4_warn(dev, "Async event for bogus CQ %08x\n", cqn);
mlx4_dbg(dev, "Async event for bogus CQ %08x\n", cqn);
return;
}
/* Acessing the CQ outside of rcu_read_lock is safe, because
* the CQ is freed only after interrupt handling is completed.
*/
cq->event(cq, event_type);
if (atomic_dec_and_test(&cq->refcount))
complete(&cq->free);
}
static int mlx4_SW2HW_CQ(struct mlx4_dev *dev, struct mlx4_cmd_mailbox *mailbox,
@ -301,9 +303,9 @@ int mlx4_cq_alloc(struct mlx4_dev *dev, int nent,
if (err)
return err;
spin_lock_irq(&cq_table->lock);
spin_lock(&cq_table->lock);
err = radix_tree_insert(&cq_table->tree, cq->cqn, cq);
spin_unlock_irq(&cq_table->lock);
spin_unlock(&cq_table->lock);
if (err)
goto err_icm;
@ -349,9 +351,9 @@ int mlx4_cq_alloc(struct mlx4_dev *dev, int nent,
return 0;
err_radix:
spin_lock_irq(&cq_table->lock);
spin_lock(&cq_table->lock);
radix_tree_delete(&cq_table->tree, cq->cqn);
spin_unlock_irq(&cq_table->lock);
spin_unlock(&cq_table->lock);
err_icm:
mlx4_cq_free_icm(dev, cq->cqn);
@ -370,15 +372,15 @@ void mlx4_cq_free(struct mlx4_dev *dev, struct mlx4_cq *cq)
if (err)
mlx4_warn(dev, "HW2SW_CQ failed (%d) for CQN %06x\n", err, cq->cqn);
spin_lock(&cq_table->lock);
radix_tree_delete(&cq_table->tree, cq->cqn);
spin_unlock(&cq_table->lock);
synchronize_irq(priv->eq_table.eq[MLX4_CQ_TO_EQ_VECTOR(cq->vector)].irq);
if (priv->eq_table.eq[MLX4_CQ_TO_EQ_VECTOR(cq->vector)].irq !=
priv->eq_table.eq[MLX4_EQ_ASYNC].irq)
synchronize_irq(priv->eq_table.eq[MLX4_EQ_ASYNC].irq);
spin_lock_irq(&cq_table->lock);
radix_tree_delete(&cq_table->tree, cq->cqn);
spin_unlock_irq(&cq_table->lock);
if (atomic_dec_and_test(&cq->refcount))
complete(&cq->free);
wait_for_completion(&cq->free);

View File

@ -1748,8 +1748,11 @@ int mlx4_en_start_port(struct net_device *dev)
/* Process all completions if exist to prevent
* the queues freezing if they are full
*/
for (i = 0; i < priv->rx_ring_num; i++)
for (i = 0; i < priv->rx_ring_num; i++) {
local_bh_disable();
napi_schedule(&priv->rx_cq[i]->napi);
local_bh_enable();
}
netif_tx_start_all_queues(dev);
netif_device_attach(dev);

View File

@ -554,8 +554,9 @@ static int mlx4_eq_int(struct mlx4_dev *dev, struct mlx4_eq *eq)
break;
case MLX4_EVENT_TYPE_SRQ_LIMIT:
mlx4_dbg(dev, "%s: MLX4_EVENT_TYPE_SRQ_LIMIT\n",
__func__);
mlx4_dbg(dev, "%s: MLX4_EVENT_TYPE_SRQ_LIMIT. srq_no=0x%x, eq 0x%x\n",
__func__, be32_to_cpu(eqe->event.srq.srqn),
eq->eqn);
case MLX4_EVENT_TYPE_SRQ_CATAS_ERROR:
if (mlx4_is_master(dev)) {
/* forward only to slave owning the SRQ */
@ -570,15 +571,19 @@ static int mlx4_eq_int(struct mlx4_dev *dev, struct mlx4_eq *eq)
eq->eqn, eq->cons_index, ret);
break;
}
mlx4_warn(dev, "%s: slave:%d, srq_no:0x%x, event: %02x(%02x)\n",
__func__, slave,
be32_to_cpu(eqe->event.srq.srqn),
eqe->type, eqe->subtype);
if (eqe->type ==
MLX4_EVENT_TYPE_SRQ_CATAS_ERROR)
mlx4_warn(dev, "%s: slave:%d, srq_no:0x%x, event: %02x(%02x)\n",
__func__, slave,
be32_to_cpu(eqe->event.srq.srqn),
eqe->type, eqe->subtype);
if (!ret && slave != dev->caps.function) {
mlx4_warn(dev, "%s: sending event %02x(%02x) to slave:%d\n",
__func__, eqe->type,
eqe->subtype, slave);
if (eqe->type ==
MLX4_EVENT_TYPE_SRQ_CATAS_ERROR)
mlx4_warn(dev, "%s: sending event %02x(%02x) to slave:%d\n",
__func__, eqe->type,
eqe->subtype, slave);
mlx4_slave_event(dev, slave, eqe);
break;
}

View File

@ -2980,6 +2980,9 @@ int mlx4_RST2INIT_QP_wrapper(struct mlx4_dev *dev, int slave,
put_res(dev, slave, srqn, RES_SRQ);
qp->srq = srq;
}
/* Save param3 for dynamic changes from VST back to VGT */
qp->param3 = qpc->param3;
put_res(dev, slave, rcqn, RES_CQ);
put_res(dev, slave, mtt_base, RES_MTT);
res_end_move(dev, slave, RES_QP, qpn);
@ -3772,7 +3775,6 @@ int mlx4_INIT2RTR_QP_wrapper(struct mlx4_dev *dev, int slave,
int qpn = vhcr->in_modifier & 0x7fffff;
struct res_qp *qp;
u8 orig_sched_queue;
__be32 orig_param3 = qpc->param3;
u8 orig_vlan_control = qpc->pri_path.vlan_control;
u8 orig_fvl_rx = qpc->pri_path.fvl_rx;
u8 orig_pri_path_fl = qpc->pri_path.fl;
@ -3814,7 +3816,6 @@ out:
*/
if (!err) {
qp->sched_queue = orig_sched_queue;
qp->param3 = orig_param3;
qp->vlan_control = orig_vlan_control;
qp->fvl_rx = orig_fvl_rx;
qp->pri_path_fl = orig_pri_path_fl;

View File

@ -668,9 +668,12 @@ static int mlx5e_route_lookup_ipv4(struct mlx5e_priv *priv,
int ttl;
#if IS_ENABLED(CONFIG_INET)
int ret;
rt = ip_route_output_key(dev_net(mirred_dev), fl4);
if (IS_ERR(rt))
return PTR_ERR(rt);
ret = PTR_ERR_OR_ZERO(rt);
if (ret)
return ret;
#else
return -EOPNOTSUPP;
#endif
@ -741,8 +744,8 @@ static int mlx5e_create_encap_header_ipv4(struct mlx5e_priv *priv,
struct flowi4 fl4 = {};
char *encap_header;
int encap_size;
__be32 saddr = 0;
int ttl = 0;
__be32 saddr;
int ttl;
int err;
encap_header = kzalloc(max_encap_size, GFP_KERNEL);

View File

@ -209,21 +209,21 @@ MLXSW_ITEM32(pci, eqe, owner, 0x0C, 0, 1);
/* pci_eqe_cmd_token
* Command completion event - token
*/
MLXSW_ITEM32(pci, eqe, cmd_token, 0x08, 16, 16);
MLXSW_ITEM32(pci, eqe, cmd_token, 0x00, 16, 16);
/* pci_eqe_cmd_status
* Command completion event - status
*/
MLXSW_ITEM32(pci, eqe, cmd_status, 0x08, 0, 8);
MLXSW_ITEM32(pci, eqe, cmd_status, 0x00, 0, 8);
/* pci_eqe_cmd_out_param_h
* Command completion event - output parameter - higher part
*/
MLXSW_ITEM32(pci, eqe, cmd_out_param_h, 0x0C, 0, 32);
MLXSW_ITEM32(pci, eqe, cmd_out_param_h, 0x04, 0, 32);
/* pci_eqe_cmd_out_param_l
* Command completion event - output parameter - lower part
*/
MLXSW_ITEM32(pci, eqe, cmd_out_param_l, 0x10, 0, 32);
MLXSW_ITEM32(pci, eqe, cmd_out_param_l, 0x08, 0, 32);
#endif

View File

@ -684,6 +684,7 @@ static netdev_tx_t mlxsw_sp_port_xmit(struct sk_buff *skb,
dev_kfree_skb_any(skb_orig);
return NETDEV_TX_OK;
}
dev_consume_skb_any(skb_orig);
}
if (eth_skb_pad(skb)) {

View File

@ -345,6 +345,7 @@ static netdev_tx_t mlxsw_sx_port_xmit(struct sk_buff *skb,
dev_kfree_skb_any(skb_orig);
return NETDEV_TX_OK;
}
dev_consume_skb_any(skb_orig);
}
mlxsw_sx_txhdr_construct(skb, &tx_info);
/* TX header is consumed by HW on the way so we shouldn't count its

View File

@ -201,6 +201,13 @@ int emac_phy_config(struct platform_device *pdev, struct emac_adapter *adpt)
else
adpt->phydev = mdiobus_get_phy(mii_bus, phy_addr);
/* of_phy_find_device() claims a reference to the phydev,
* so we do that here manually as well. When the driver
* later unloads, it can unilaterally drop the reference
* without worrying about ACPI vs DT.
*/
if (adpt->phydev)
get_device(&adpt->phydev->mdio.dev);
} else {
struct device_node *phy_np;

View File

@ -719,8 +719,7 @@ static int emac_probe(struct platform_device *pdev)
err_undo_napi:
netif_napi_del(&adpt->rx_q.napi);
err_undo_mdiobus:
if (!has_acpi_companion(&pdev->dev))
put_device(&adpt->phydev->mdio.dev);
put_device(&adpt->phydev->mdio.dev);
mdiobus_unregister(adpt->mii_bus);
err_undo_clocks:
emac_clks_teardown(adpt);
@ -740,8 +739,7 @@ static int emac_remove(struct platform_device *pdev)
emac_clks_teardown(adpt);
if (!has_acpi_companion(&pdev->dev))
put_device(&adpt->phydev->mdio.dev);
put_device(&adpt->phydev->mdio.dev);
mdiobus_unregister(adpt->mii_bus);
free_netdev(netdev);

View File

@ -926,14 +926,10 @@ static int ravb_poll(struct napi_struct *napi, int budget)
/* Receive error message handling */
priv->rx_over_errors = priv->stats[RAVB_BE].rx_over_errors;
priv->rx_over_errors += priv->stats[RAVB_NC].rx_over_errors;
if (priv->rx_over_errors != ndev->stats.rx_over_errors) {
if (priv->rx_over_errors != ndev->stats.rx_over_errors)
ndev->stats.rx_over_errors = priv->rx_over_errors;
netif_err(priv, rx_err, ndev, "Receive Descriptor Empty\n");
}
if (priv->rx_fifo_errors != ndev->stats.rx_fifo_errors) {
if (priv->rx_fifo_errors != ndev->stats.rx_fifo_errors)
ndev->stats.rx_fifo_errors = priv->rx_fifo_errors;
netif_err(priv, rx_err, ndev, "Receive FIFO Overflow\n");
}
out:
return budget - quota;
}
@ -1508,6 +1504,19 @@ static netdev_tx_t ravb_start_xmit(struct sk_buff *skb, struct net_device *ndev)
buffer = PTR_ALIGN(priv->tx_align[q], DPTR_ALIGN) +
entry / NUM_TX_DESC * DPTR_ALIGN;
len = PTR_ALIGN(skb->data, DPTR_ALIGN) - skb->data;
/* Zero length DMA descriptors are problematic as they seem to
* terminate DMA transfers. Avoid them by simply using a length of
* DPTR_ALIGN (4) when skb data is aligned to DPTR_ALIGN.
*
* As skb is guaranteed to have at least ETH_ZLEN (60) bytes of
* data by the call to skb_put_padto() above this is safe with
* respect to both the length of the first DMA descriptor (len)
* overflowing the available data and the length of the second DMA
* descriptor (skb->len - len) being negative.
*/
if (len == 0)
len = DPTR_ALIGN;
memcpy(buffer, skb->data, len);
dma_addr = dma_map_single(ndev->dev.parent, buffer, len, DMA_TO_DEVICE);
if (dma_mapping_error(ndev->dev.parent, dma_addr))

View File

@ -3326,9 +3326,9 @@ int stmmac_dvr_probe(struct device *device,
(priv->plat->maxmtu >= ndev->min_mtu))
ndev->max_mtu = priv->plat->maxmtu;
else if (priv->plat->maxmtu < ndev->min_mtu)
netdev_warn(priv->dev,
"%s: warning: maxmtu having invalid value (%d)\n",
__func__, priv->plat->maxmtu);
dev_warn(priv->device,
"%s: warning: maxmtu having invalid value (%d)\n",
__func__, priv->plat->maxmtu);
if (flow_ctrl)
priv->flow_ctrl = FLOW_AUTO; /* RX/TX pause on */
@ -3340,7 +3340,8 @@ int stmmac_dvr_probe(struct device *device,
*/
if ((priv->synopsys_id >= DWMAC_CORE_3_50) && (!priv->plat->riwt_off)) {
priv->use_riwt = 1;
netdev_info(priv->dev, "Enable RX Mitigation via HW Watchdog Timer\n");
dev_info(priv->device,
"Enable RX Mitigation via HW Watchdog Timer\n");
}
netif_napi_add(ndev, &priv->napi, stmmac_poll, 64);
@ -3366,17 +3367,17 @@ int stmmac_dvr_probe(struct device *device,
/* MDIO bus Registration */
ret = stmmac_mdio_register(ndev);
if (ret < 0) {
netdev_err(priv->dev,
"%s: MDIO bus (id: %d) registration failed",
__func__, priv->plat->bus_id);
dev_err(priv->device,
"%s: MDIO bus (id: %d) registration failed",
__func__, priv->plat->bus_id);
goto error_mdio_register;
}
}
ret = register_netdev(ndev);
if (ret) {
netdev_err(priv->dev, "%s: ERROR %i registering the device\n",
__func__, ret);
dev_err(priv->device, "%s: ERROR %i registering the device\n",
__func__, ret);
goto error_netdev_register;
}

View File

@ -1210,7 +1210,7 @@ int cpmac_init(void)
goto fail_alloc;
}
#warning FIXME: unhardcode gpio&reset bits
/* FIXME: unhardcode gpio&reset bits */
ar7_gpio_disable(26);
ar7_gpio_disable(27);
ar7_device_reset(AR7_RESET_BIT_CPMAC_LO);

View File

@ -659,6 +659,7 @@ int netvsc_recv_callback(struct hv_device *device_obj,
* policy filters on the host). Deliver these via the VF
* interface in the guest.
*/
rcu_read_lock();
vf_netdev = rcu_dereference(net_device_ctx->vf_netdev);
if (vf_netdev && (vf_netdev->flags & IFF_UP))
net = vf_netdev;
@ -667,6 +668,7 @@ int netvsc_recv_callback(struct hv_device *device_obj,
skb = netvsc_alloc_recv_skb(net, packet, csum_info, *data, vlan_tci);
if (unlikely(!skb)) {
++net->stats.rx_dropped;
rcu_read_unlock();
return NVSP_STAT_FAIL;
}
@ -696,6 +698,7 @@ int netvsc_recv_callback(struct hv_device *device_obj,
* TODO - use NAPI?
*/
netif_rx(skb);
rcu_read_unlock();
return 0;
}

View File

@ -1715,9 +1715,9 @@ static int at86rf230_probe(struct spi_device *spi)
/* Reset */
if (gpio_is_valid(rstn)) {
udelay(1);
gpio_set_value(rstn, 0);
gpio_set_value_cansleep(rstn, 0);
udelay(1);
gpio_set_value(rstn, 1);
gpio_set_value_cansleep(rstn, 1);
usleep_range(120, 240);
}

View File

@ -117,13 +117,26 @@ static int atusb_read_reg(struct atusb *atusb, uint8_t reg)
{
struct usb_device *usb_dev = atusb->usb_dev;
int ret;
uint8_t *buffer;
uint8_t value;
buffer = kmalloc(1, GFP_KERNEL);
if (!buffer)
return -ENOMEM;
dev_dbg(&usb_dev->dev, "atusb: reg = 0x%x\n", reg);
ret = atusb_control_msg(atusb, usb_rcvctrlpipe(usb_dev, 0),
ATUSB_REG_READ, ATUSB_REQ_FROM_DEV,
0, reg, &value, 1, 1000);
return ret >= 0 ? value : ret;
0, reg, buffer, 1, 1000);
if (ret >= 0) {
value = buffer[0];
kfree(buffer);
return value;
} else {
kfree(buffer);
return ret;
}
}
static int atusb_write_subreg(struct atusb *atusb, uint8_t reg, uint8_t mask,
@ -549,13 +562,6 @@ static int
atusb_set_frame_retries(struct ieee802154_hw *hw, s8 retries)
{
struct atusb *atusb = hw->priv;
struct device *dev = &atusb->usb_dev->dev;
if (atusb->fw_ver_maj == 0 && atusb->fw_ver_min < 3) {
dev_info(dev, "Automatic frame retransmission is only available from "
"firmware version 0.3. Please update if you want this feature.");
return -EINVAL;
}
return atusb_write_subreg(atusb, SR_MAX_FRAME_RETRIES, retries);
}
@ -608,9 +614,13 @@ static const struct ieee802154_ops atusb_ops = {
static int atusb_get_and_show_revision(struct atusb *atusb)
{
struct usb_device *usb_dev = atusb->usb_dev;
unsigned char buffer[3];
unsigned char *buffer;
int ret;
buffer = kmalloc(3, GFP_KERNEL);
if (!buffer)
return -ENOMEM;
/* Get a couple of the ATMega Firmware values */
ret = atusb_control_msg(atusb, usb_rcvctrlpipe(usb_dev, 0),
ATUSB_ID, ATUSB_REQ_FROM_DEV, 0, 0,
@ -631,15 +641,20 @@ static int atusb_get_and_show_revision(struct atusb *atusb)
dev_info(&usb_dev->dev, "Please update to version 0.2 or newer");
}
kfree(buffer);
return ret;
}
static int atusb_get_and_show_build(struct atusb *atusb)
{
struct usb_device *usb_dev = atusb->usb_dev;
char build[ATUSB_BUILD_SIZE + 1];
char *build;
int ret;
build = kmalloc(ATUSB_BUILD_SIZE + 1, GFP_KERNEL);
if (!build)
return -ENOMEM;
ret = atusb_control_msg(atusb, usb_rcvctrlpipe(usb_dev, 0),
ATUSB_BUILD, ATUSB_REQ_FROM_DEV, 0, 0,
build, ATUSB_BUILD_SIZE, 1000);
@ -648,6 +663,7 @@ static int atusb_get_and_show_build(struct atusb *atusb)
dev_info(&usb_dev->dev, "Firmware: build %s\n", build);
}
kfree(build);
return ret;
}
@ -698,7 +714,7 @@ fail:
static int atusb_set_extended_addr(struct atusb *atusb)
{
struct usb_device *usb_dev = atusb->usb_dev;
unsigned char buffer[IEEE802154_EXTENDED_ADDR_LEN];
unsigned char *buffer;
__le64 extended_addr;
u64 addr;
int ret;
@ -710,12 +726,20 @@ static int atusb_set_extended_addr(struct atusb *atusb)
return 0;
}
buffer = kmalloc(IEEE802154_EXTENDED_ADDR_LEN, GFP_KERNEL);
if (!buffer)
return -ENOMEM;
/* Firmware is new enough so we fetch the address from EEPROM */
ret = atusb_control_msg(atusb, usb_rcvctrlpipe(usb_dev, 0),
ATUSB_EUI64_READ, ATUSB_REQ_FROM_DEV, 0, 0,
buffer, IEEE802154_EXTENDED_ADDR_LEN, 1000);
if (ret < 0)
dev_err(&usb_dev->dev, "failed to fetch extended address\n");
if (ret < 0) {
dev_err(&usb_dev->dev, "failed to fetch extended address, random address set\n");
ieee802154_random_extended_addr(&atusb->hw->phy->perm_extended_addr);
kfree(buffer);
return ret;
}
memcpy(&extended_addr, buffer, IEEE802154_EXTENDED_ADDR_LEN);
/* Check if read address is not empty and the unicast bit is set correctly */
@ -729,6 +753,7 @@ static int atusb_set_extended_addr(struct atusb *atusb)
&addr);
}
kfree(buffer);
return ret;
}
@ -770,8 +795,7 @@ static int atusb_probe(struct usb_interface *interface,
hw->parent = &usb_dev->dev;
hw->flags = IEEE802154_HW_TX_OMIT_CKSUM | IEEE802154_HW_AFILT |
IEEE802154_HW_PROMISCUOUS | IEEE802154_HW_CSMA_PARAMS |
IEEE802154_HW_FRAME_RETRIES;
IEEE802154_HW_PROMISCUOUS | IEEE802154_HW_CSMA_PARAMS;
hw->phy->flags = WPAN_PHY_FLAG_TXPOWER | WPAN_PHY_FLAG_CCA_ED_LEVEL |
WPAN_PHY_FLAG_CCA_MODE;
@ -800,6 +824,9 @@ static int atusb_probe(struct usb_interface *interface,
atusb_get_and_show_build(atusb);
atusb_set_extended_addr(atusb);
if (atusb->fw_ver_maj >= 0 && atusb->fw_ver_min >= 3)
hw->flags |= IEEE802154_HW_FRAME_RETRIES;
ret = atusb_get_and_clear_error(atusb);
if (ret) {
dev_err(&atusb->usb_dev->dev,

View File

@ -132,12 +132,16 @@ static int dp83867_of_init(struct phy_device *phydev)
ret = of_property_read_u32(of_node, "ti,rx-internal-delay",
&dp83867->rx_id_delay);
if (ret)
if (ret &&
(phydev->interface == PHY_INTERFACE_MODE_RGMII_ID ||
phydev->interface == PHY_INTERFACE_MODE_RGMII_RXID))
return ret;
ret = of_property_read_u32(of_node, "ti,tx-internal-delay",
&dp83867->tx_id_delay);
if (ret)
if (ret &&
(phydev->interface == PHY_INTERFACE_MODE_RGMII_ID ||
phydev->interface == PHY_INTERFACE_MODE_RGMII_TXID))
return ret;
return of_property_read_u32(of_node, "ti,fifo-depth",

View File

@ -1730,7 +1730,7 @@ static u8 r8152_rx_csum(struct r8152 *tp, struct rx_desc *rx_desc)
u8 checksum = CHECKSUM_NONE;
u32 opts2, opts3;
if (tp->version == RTL_VER_01 || tp->version == RTL_VER_02)
if (!(tp->netdev->features & NETIF_F_RXCSUM))
goto return_result;
opts2 = le32_to_cpu(rx_desc->opts2);
@ -4356,6 +4356,11 @@ static int rtl8152_probe(struct usb_interface *intf,
NETIF_F_HIGHDMA | NETIF_F_FRAGLIST |
NETIF_F_IPV6_CSUM | NETIF_F_TSO6;
if (tp->version == RTL_VER_01) {
netdev->features &= ~NETIF_F_RXCSUM;
netdev->hw_features &= ~NETIF_F_RXCSUM;
}
netdev->ethtool_ops = &ops;
netif_set_gso_max_size(netdev, RTL_LIMITED_TSO_SIZE);

View File

@ -1798,7 +1798,7 @@ static int vxlan_build_skb(struct sk_buff *skb, struct dst_entry *dst,
static struct rtable *vxlan_get_route(struct vxlan_dev *vxlan, struct net_device *dev,
struct vxlan_sock *sock4,
struct sk_buff *skb, int oif, u8 tos,
__be32 daddr, __be32 *saddr,
__be32 daddr, __be32 *saddr, __be16 dport, __be16 sport,
struct dst_cache *dst_cache,
const struct ip_tunnel_info *info)
{
@ -1824,6 +1824,8 @@ static struct rtable *vxlan_get_route(struct vxlan_dev *vxlan, struct net_device
fl4.flowi4_proto = IPPROTO_UDP;
fl4.daddr = daddr;
fl4.saddr = *saddr;
fl4.fl4_dport = dport;
fl4.fl4_sport = sport;
rt = ip_route_output_key(vxlan->net, &fl4);
if (likely(!IS_ERR(rt))) {
@ -1851,6 +1853,7 @@ static struct dst_entry *vxlan6_get_route(struct vxlan_dev *vxlan,
__be32 label,
const struct in6_addr *daddr,
struct in6_addr *saddr,
__be16 dport, __be16 sport,
struct dst_cache *dst_cache,
const struct ip_tunnel_info *info)
{
@ -1877,6 +1880,8 @@ static struct dst_entry *vxlan6_get_route(struct vxlan_dev *vxlan,
fl6.flowlabel = ip6_make_flowinfo(RT_TOS(tos), label);
fl6.flowi6_mark = skb->mark;
fl6.flowi6_proto = IPPROTO_UDP;
fl6.fl6_dport = dport;
fl6.fl6_sport = sport;
err = ipv6_stub->ipv6_dst_lookup(vxlan->net,
sock6->sock->sk,
@ -2068,6 +2073,7 @@ static void vxlan_xmit_one(struct sk_buff *skb, struct net_device *dev,
rdst ? rdst->remote_ifindex : 0, tos,
dst->sin.sin_addr.s_addr,
&src->sin.sin_addr.s_addr,
dst_port, src_port,
dst_cache, info);
if (IS_ERR(rt)) {
err = PTR_ERR(rt);
@ -2104,6 +2110,7 @@ static void vxlan_xmit_one(struct sk_buff *skb, struct net_device *dev,
rdst ? rdst->remote_ifindex : 0, tos,
label, &dst->sin6.sin6_addr,
&src->sin6.sin6_addr,
dst_port, src_port,
dst_cache, info);
if (IS_ERR(ndst)) {
err = PTR_ERR(ndst);
@ -2430,7 +2437,7 @@ static int vxlan_fill_metadata_dst(struct net_device *dev, struct sk_buff *skb)
rt = vxlan_get_route(vxlan, dev, sock4, skb, 0, info->key.tos,
info->key.u.ipv4.dst,
&info->key.u.ipv4.src, NULL, info);
&info->key.u.ipv4.src, dport, sport, NULL, info);
if (IS_ERR(rt))
return PTR_ERR(rt);
ip_rt_put(rt);
@ -2441,7 +2448,7 @@ static int vxlan_fill_metadata_dst(struct net_device *dev, struct sk_buff *skb)
ndst = vxlan6_get_route(vxlan, dev, sock6, skb, 0, info->key.tos,
info->key.label, &info->key.u.ipv6.dst,
&info->key.u.ipv6.src, NULL, info);
&info->key.u.ipv6.src, dport, sport, NULL, info);
if (IS_ERR(ndst))
return PTR_ERR(ndst);
dst_release(ndst);

View File

@ -216,7 +216,7 @@ u64 bpf_tail_call(u64 ctx, u64 r2, u64 index, u64 r4, u64 r5);
u64 bpf_get_stackid(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5);
bool bpf_prog_array_compatible(struct bpf_array *array, const struct bpf_prog *fp);
int bpf_prog_calc_digest(struct bpf_prog *fp);
int bpf_prog_calc_tag(struct bpf_prog *fp);
const struct bpf_func_proto *bpf_get_trace_printk_proto(void);

View File

@ -57,6 +57,8 @@ struct bpf_prog_aux;
/* BPF program can access up to 512 bytes of stack space. */
#define MAX_BPF_STACK 512
#define BPF_TAG_SIZE 8
/* Helper macros for filter block array initializers. */
/* ALU ops on registers, bpf_add|sub|...: dst_reg += src_reg */
@ -408,7 +410,7 @@ struct bpf_prog {
kmemcheck_bitfield_end(meta);
enum bpf_prog_type type; /* Type of BPF program */
u32 len; /* Number of filter blocks */
u32 digest[SHA_DIGEST_WORDS]; /* Program digest */
u8 tag[BPF_TAG_SIZE];
struct bpf_prog_aux *aux; /* Auxiliary fields */
struct sock_fprog_kern *orig_prog; /* Original BPF program */
unsigned int (*bpf_func)(const void *ctx,
@ -519,7 +521,7 @@ static inline u32 bpf_prog_insn_size(const struct bpf_prog *prog)
return prog->len * sizeof(struct bpf_insn);
}
static inline u32 bpf_prog_digest_scratch_size(const struct bpf_prog *prog)
static inline u32 bpf_prog_tag_scratch_size(const struct bpf_prog *prog)
{
return round_up(bpf_prog_insn_size(prog) +
sizeof(__be64) + 1, SHA_MESSAGE_BYTES);

View File

@ -62,8 +62,13 @@ static inline unsigned int tcp_optlen(const struct sk_buff *skb)
/* TCP Fast Open Cookie as stored in memory */
struct tcp_fastopen_cookie {
union {
u8 val[TCP_FASTOPEN_COOKIE_MAX];
#if IS_ENABLED(CONFIG_IPV6)
struct in6_addr addr;
#endif
};
s8 len;
u8 val[TCP_FASTOPEN_COOKIE_MAX];
bool exp; /* In RFC6994 experimental option format */
};

View File

@ -1772,7 +1772,9 @@ enum nl80211_commands {
*
* @NL80211_ATTR_OPMODE_NOTIF: Operating mode field from Operating Mode
* Notification Element based on association request when used with
* %NL80211_CMD_NEW_STATION; u8 attribute.
* %NL80211_CMD_NEW_STATION or %NL80211_CMD_SET_STATION (only when
* %NL80211_FEATURE_FULL_AP_CLIENT_STATE is supported, or with TDLS);
* u8 attribute.
*
* @NL80211_ATTR_VENDOR_ID: The vendor ID, either a 24-bit OUI or, if
* %NL80211_VENDOR_ID_IS_LINUX is set, a special Linux ID (not used yet)

View File

@ -397,7 +397,7 @@ enum {
TCA_BPF_NAME,
TCA_BPF_FLAGS,
TCA_BPF_FLAGS_GEN,
TCA_BPF_DIGEST,
TCA_BPF_TAG,
__TCA_BPF_MAX,
};

View File

@ -27,7 +27,7 @@ enum {
TCA_ACT_BPF_FD,
TCA_ACT_BPF_NAME,
TCA_ACT_BPF_PAD,
TCA_ACT_BPF_DIGEST,
TCA_ACT_BPF_TAG,
__TCA_ACT_BPF_MAX,
};
#define TCA_ACT_BPF_MAX (__TCA_ACT_BPF_MAX - 1)

View File

@ -146,10 +146,11 @@ void __bpf_prog_free(struct bpf_prog *fp)
vfree(fp);
}
int bpf_prog_calc_digest(struct bpf_prog *fp)
int bpf_prog_calc_tag(struct bpf_prog *fp)
{
const u32 bits_offset = SHA_MESSAGE_BYTES - sizeof(__be64);
u32 raw_size = bpf_prog_digest_scratch_size(fp);
u32 raw_size = bpf_prog_tag_scratch_size(fp);
u32 digest[SHA_DIGEST_WORDS];
u32 ws[SHA_WORKSPACE_WORDS];
u32 i, bsize, psize, blocks;
struct bpf_insn *dst;
@ -162,7 +163,7 @@ int bpf_prog_calc_digest(struct bpf_prog *fp)
if (!raw)
return -ENOMEM;
sha_init(fp->digest);
sha_init(digest);
memset(ws, 0, sizeof(ws));
/* We need to take out the map fd for the digest calculation
@ -204,13 +205,14 @@ int bpf_prog_calc_digest(struct bpf_prog *fp)
*bits = cpu_to_be64((psize - 1) << 3);
while (blocks--) {
sha_transform(fp->digest, todo, ws);
sha_transform(digest, todo, ws);
todo += SHA_MESSAGE_BYTES;
}
result = (__force __be32 *)fp->digest;
result = (__force __be32 *)digest;
for (i = 0; i < SHA_DIGEST_WORDS; i++)
result[i] = cpu_to_be32(fp->digest[i]);
result[i] = cpu_to_be32(digest[i]);
memcpy(fp->tag, result, sizeof(fp->tag));
vfree(raw);
return 0;

View File

@ -688,17 +688,17 @@ static int bpf_prog_release(struct inode *inode, struct file *filp)
static void bpf_prog_show_fdinfo(struct seq_file *m, struct file *filp)
{
const struct bpf_prog *prog = filp->private_data;
char prog_digest[sizeof(prog->digest) * 2 + 1] = { };
char prog_tag[sizeof(prog->tag) * 2 + 1] = { };
bin2hex(prog_digest, prog->digest, sizeof(prog->digest));
bin2hex(prog_tag, prog->tag, sizeof(prog->tag));
seq_printf(m,
"prog_type:\t%u\n"
"prog_jited:\t%u\n"
"prog_digest:\t%s\n"
"prog_tag:\t%s\n"
"memlock:\t%llu\n",
prog->type,
prog->jited,
prog_digest,
prog_tag,
prog->pages * 1ULL << PAGE_SHIFT);
}
#endif

View File

@ -2936,7 +2936,7 @@ static int replace_map_fd_with_map_ptr(struct bpf_verifier_env *env)
int insn_cnt = env->prog->len;
int i, j, err;
err = bpf_prog_calc_digest(env->prog);
err = bpf_prog_calc_tag(env->prog);
if (err)
return err;

View File

@ -264,7 +264,7 @@ void ax25_disconnect(ax25_cb *ax25, int reason)
{
ax25_clear_queues(ax25);
if (!sock_flag(ax25->sk, SOCK_DESTROY))
if (!ax25->sk || !sock_flag(ax25->sk, SOCK_DESTROY))
ax25_stop_heartbeat(ax25);
ax25_stop_t1timer(ax25);
ax25_stop_t2timer(ax25);

View File

@ -1279,8 +1279,9 @@ int fib_dump_info(struct sk_buff *skb, u32 portid, u32 seq, int event,
nla_put_u32(skb, RTA_FLOW, fi->fib_nh[0].nh_tclassid))
goto nla_put_failure;
#endif
if (fi->fib_nh->nh_lwtstate)
lwtunnel_fill_encap(skb, fi->fib_nh->nh_lwtstate);
if (fi->fib_nh->nh_lwtstate &&
lwtunnel_fill_encap(skb, fi->fib_nh->nh_lwtstate) < 0)
goto nla_put_failure;
}
#ifdef CONFIG_IP_ROUTE_MULTIPATH
if (fi->fib_nhs > 1) {
@ -1316,8 +1317,10 @@ int fib_dump_info(struct sk_buff *skb, u32 portid, u32 seq, int event,
nla_put_u32(skb, RTA_FLOW, nh->nh_tclassid))
goto nla_put_failure;
#endif
if (nh->nh_lwtstate)
lwtunnel_fill_encap(skb, nh->nh_lwtstate);
if (nh->nh_lwtstate &&
lwtunnel_fill_encap(skb, nh->nh_lwtstate) < 0)
goto nla_put_failure;
/* length of rtnetlink header + attributes */
rtnh->rtnh_len = nlmsg_get_pos(skb) - (void *) rtnh;
} endfor_nexthops(fi);

View File

@ -2472,7 +2472,7 @@ static int rt_fill_info(struct net *net, __be32 dst, __be32 src, u32 table_id,
r->rtm_dst_len = 32;
r->rtm_src_len = 0;
r->rtm_tos = fl4->flowi4_tos;
r->rtm_table = table_id;
r->rtm_table = table_id < 256 ? table_id : RT_TABLE_COMPAT;
if (nla_put_u32(skb, RTA_TABLE, table_id))
goto nla_put_failure;
r->rtm_type = rt->rt_type;

View File

@ -113,7 +113,7 @@ static bool tcp_fastopen_cookie_gen(struct request_sock *req,
struct tcp_fastopen_cookie tmp;
if (__tcp_fastopen_cookie_gen(&ip6h->saddr, &tmp)) {
struct in6_addr *buf = (struct in6_addr *) tmp.val;
struct in6_addr *buf = &tmp.addr;
int i;
for (i = 0; i < 4; i++)

View File

@ -1108,7 +1108,7 @@ route_lookup:
t->parms.name);
goto tx_err_dst_release;
}
mtu = dst_mtu(dst) - psh_hlen;
mtu = dst_mtu(dst) - psh_hlen - t->tun_hlen;
if (encap_limit >= 0) {
max_headroom += 8;
mtu -= 8;
@ -1117,7 +1117,7 @@ route_lookup:
mtu = IPV6_MIN_MTU;
if (skb_dst(skb) && !t->parms.collect_md)
skb_dst(skb)->ops->update_pmtu(skb_dst(skb), NULL, skb, mtu);
if (skb->len > mtu && !skb_is_gso(skb)) {
if (skb->len - t->tun_hlen > mtu && !skb_is_gso(skb)) {
*pmtu = mtu;
err = -EMSGSIZE;
goto tx_err_dst_release;

View File

@ -81,7 +81,7 @@ static void mld_gq_timer_expire(unsigned long data);
static void mld_ifc_timer_expire(unsigned long data);
static void mld_ifc_event(struct inet6_dev *idev);
static void mld_add_delrec(struct inet6_dev *idev, struct ifmcaddr6 *pmc);
static void mld_del_delrec(struct inet6_dev *idev, const struct in6_addr *addr);
static void mld_del_delrec(struct inet6_dev *idev, struct ifmcaddr6 *pmc);
static void mld_clear_delrec(struct inet6_dev *idev);
static bool mld_in_v1_mode(const struct inet6_dev *idev);
static int sf_setstate(struct ifmcaddr6 *pmc);
@ -692,9 +692,9 @@ static void igmp6_group_dropped(struct ifmcaddr6 *mc)
dev_mc_del(dev, buf);
}
if (mc->mca_flags & MAF_NOREPORT)
goto done;
spin_unlock_bh(&mc->mca_lock);
if (mc->mca_flags & MAF_NOREPORT)
return;
if (!mc->idev->dead)
igmp6_leave_group(mc);
@ -702,8 +702,6 @@ static void igmp6_group_dropped(struct ifmcaddr6 *mc)
spin_lock_bh(&mc->mca_lock);
if (del_timer(&mc->mca_timer))
atomic_dec(&mc->mca_refcnt);
done:
ip6_mc_clear_src(mc);
spin_unlock_bh(&mc->mca_lock);
}
@ -748,10 +746,11 @@ static void mld_add_delrec(struct inet6_dev *idev, struct ifmcaddr6 *im)
spin_unlock_bh(&idev->mc_lock);
}
static void mld_del_delrec(struct inet6_dev *idev, const struct in6_addr *pmca)
static void mld_del_delrec(struct inet6_dev *idev, struct ifmcaddr6 *im)
{
struct ifmcaddr6 *pmc, *pmc_prev;
struct ip6_sf_list *psf, *psf_next;
struct ip6_sf_list *psf;
struct in6_addr *pmca = &im->mca_addr;
spin_lock_bh(&idev->mc_lock);
pmc_prev = NULL;
@ -768,14 +767,20 @@ static void mld_del_delrec(struct inet6_dev *idev, const struct in6_addr *pmca)
}
spin_unlock_bh(&idev->mc_lock);
spin_lock_bh(&im->mca_lock);
if (pmc) {
for (psf = pmc->mca_tomb; psf; psf = psf_next) {
psf_next = psf->sf_next;
kfree(psf);
im->idev = pmc->idev;
im->mca_crcount = idev->mc_qrv;
im->mca_sfmode = pmc->mca_sfmode;
if (pmc->mca_sfmode == MCAST_INCLUDE) {
im->mca_tomb = pmc->mca_tomb;
im->mca_sources = pmc->mca_sources;
for (psf = im->mca_sources; psf; psf = psf->sf_next)
psf->sf_crcount = im->mca_crcount;
}
in6_dev_put(pmc->idev);
kfree(pmc);
}
spin_unlock_bh(&im->mca_lock);
}
static void mld_clear_delrec(struct inet6_dev *idev)
@ -904,7 +909,7 @@ int ipv6_dev_mc_inc(struct net_device *dev, const struct in6_addr *addr)
mca_get(mc);
write_unlock_bh(&idev->lock);
mld_del_delrec(idev, &mc->mca_addr);
mld_del_delrec(idev, mc);
igmp6_group_added(mc);
ma_put(mc);
return 0;
@ -927,6 +932,7 @@ int __ipv6_dev_mc_dec(struct inet6_dev *idev, const struct in6_addr *addr)
write_unlock_bh(&idev->lock);
igmp6_group_dropped(ma);
ip6_mc_clear_src(ma);
ma_put(ma);
return 0;
@ -2501,15 +2507,17 @@ void ipv6_mc_down(struct inet6_dev *idev)
/* Withdraw multicast list */
read_lock_bh(&idev->lock);
mld_ifc_stop_timer(idev);
mld_gq_stop_timer(idev);
mld_dad_stop_timer(idev);
for (i = idev->mc_list; i; i = i->next)
igmp6_group_dropped(i);
read_unlock_bh(&idev->lock);
mld_clear_delrec(idev);
/* Should stop timer after group drop. or we will
* start timer again in mld_ifc_event()
*/
mld_ifc_stop_timer(idev);
mld_gq_stop_timer(idev);
mld_dad_stop_timer(idev);
read_unlock_bh(&idev->lock);
}
static void ipv6_mc_reset(struct inet6_dev *idev)
@ -2531,8 +2539,10 @@ void ipv6_mc_up(struct inet6_dev *idev)
read_lock_bh(&idev->lock);
ipv6_mc_reset(idev);
for (i = idev->mc_list; i; i = i->next)
for (i = idev->mc_list; i; i = i->next) {
mld_del_delrec(idev, i);
igmp6_group_added(i);
}
read_unlock_bh(&idev->lock);
}
@ -2565,6 +2575,7 @@ void ipv6_mc_destroy_dev(struct inet6_dev *idev)
/* Deactivate timers */
ipv6_mc_down(idev);
mld_clear_delrec(idev);
/* Delete all-nodes address. */
/* We cannot call ipv6_dev_mc_dec() directly, our caller in
@ -2579,11 +2590,9 @@ void ipv6_mc_destroy_dev(struct inet6_dev *idev)
write_lock_bh(&idev->lock);
while ((i = idev->mc_list) != NULL) {
idev->mc_list = i->next;
write_unlock_bh(&idev->lock);
igmp6_group_dropped(i);
ma_put(i);
write_lock_bh(&idev->lock);
}
write_unlock_bh(&idev->lock);

View File

@ -3317,7 +3317,8 @@ static int rt6_fill_node(struct net *net,
if (nla_put_u8(skb, RTA_PREF, IPV6_EXTRACT_PREF(rt->rt6i_flags)))
goto nla_put_failure;
lwtunnel_fill_encap(skb, rt->dst.lwtstate);
if (lwtunnel_fill_encap(skb, rt->dst.lwtstate) < 0)
goto nla_put_failure;
nlmsg_end(skb, nlh);
return 0;

View File

@ -400,7 +400,7 @@ static int seg6_hmac_init_algo(void)
*p_tfm = tfm;
}
p_tfm = this_cpu_ptr(algo->tfms);
p_tfm = raw_cpu_ptr(algo->tfms);
tfm = *p_tfm;
shsize = sizeof(*shash) + crypto_shash_descsize(tfm);

View File

@ -265,7 +265,9 @@ int seg6_output(struct net *net, struct sock *sk, struct sk_buff *skb)
slwt = seg6_lwt_lwtunnel(orig_dst->lwtstate);
#ifdef CONFIG_DST_CACHE
preempt_disable();
dst = dst_cache_get(&slwt->cache);
preempt_enable();
#endif
if (unlikely(!dst)) {
@ -286,7 +288,9 @@ int seg6_output(struct net *net, struct sock *sk, struct sk_buff *skb)
}
#ifdef CONFIG_DST_CACHE
preempt_disable();
dst_cache_set_ip6(&slwt->cache, dst, &fl6.saddr);
preempt_enable();
#endif
}

View File

@ -231,9 +231,6 @@ ieee80211_get_max_required_bw(struct ieee80211_sub_if_data *sdata)
!(sta->sdata->bss && sta->sdata->bss == sdata->bss))
continue;
if (!sta->uploaded || !test_sta_flag(sta, WLAN_STA_ASSOC))
continue;
max_bw = max(max_bw, ieee80211_get_sta_bw(&sta->sta));
}
rcu_read_unlock();

View File

@ -6,6 +6,7 @@
* Copyright (c) 2006 Jiri Benc <jbenc@suse.cz>
* Copyright 2008, Johannes Berg <johannes@sipsolutions.net>
* Copyright 2013-2014 Intel Mobile Communications GmbH
* Copyright (c) 2016 Intel Deutschland GmbH
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
@ -1295,6 +1296,26 @@ static void ieee80211_iface_work(struct work_struct *work)
} else if (ieee80211_is_action(mgmt->frame_control) &&
mgmt->u.action.category == WLAN_CATEGORY_VHT) {
switch (mgmt->u.action.u.vht_group_notif.action_code) {
case WLAN_VHT_ACTION_OPMODE_NOTIF: {
struct ieee80211_rx_status *status;
enum nl80211_band band;
u8 opmode;
status = IEEE80211_SKB_RXCB(skb);
band = status->band;
opmode = mgmt->u.action.u.vht_opmode_notif.operating_mode;
mutex_lock(&local->sta_mtx);
sta = sta_info_get_bss(sdata, mgmt->sa);
if (sta)
ieee80211_vht_handle_opmode(sdata, sta,
opmode,
band);
mutex_unlock(&local->sta_mtx);
break;
}
case WLAN_VHT_ACTION_GROUPID_MGMT:
ieee80211_process_mu_groups(sdata, mgmt);
break;

View File

@ -913,12 +913,17 @@ int ieee80211_register_hw(struct ieee80211_hw *hw)
supp_ht = supp_ht || sband->ht_cap.ht_supported;
supp_vht = supp_vht || sband->vht_cap.vht_supported;
if (sband->ht_cap.ht_supported)
local->rx_chains =
max(ieee80211_mcs_to_chains(&sband->ht_cap.mcs),
local->rx_chains);
if (!sband->ht_cap.ht_supported)
continue;
/* TODO: consider VHT for RX chains, hopefully it's the same */
local->rx_chains =
max(ieee80211_mcs_to_chains(&sband->ht_cap.mcs),
local->rx_chains);
/* no need to mask, SM_PS_DISABLED has all bits set */
sband->ht_cap.cap |= WLAN_HT_CAP_SM_PS_DISABLED <<
IEEE80211_HT_CAP_SM_PS_SHIFT;
}
/* if low-level driver supports AP, we also support VLAN */

View File

@ -40,6 +40,8 @@ void rate_control_rate_init(struct sta_info *sta)
ieee80211_sta_set_rx_nss(sta);
ieee80211_recalc_min_chandef(sta->sdata);
if (!ref)
return;

View File

@ -2472,7 +2472,8 @@ ieee80211_rx_h_mesh_fwding(struct ieee80211_rx_data *rx)
if (!ifmsh->mshcfg.dot11MeshForwarding)
goto out;
fwd_skb = skb_copy_expand(skb, local->tx_headroom, 0, GFP_ATOMIC);
fwd_skb = skb_copy_expand(skb, local->tx_headroom +
sdata->encrypt_headroom, 0, GFP_ATOMIC);
if (!fwd_skb) {
net_info_ratelimited("%s: failed to clone mesh frame\n",
sdata->name);
@ -2880,17 +2881,10 @@ ieee80211_rx_h_action(struct ieee80211_rx_data *rx)
switch (mgmt->u.action.u.vht_opmode_notif.action_code) {
case WLAN_VHT_ACTION_OPMODE_NOTIF: {
u8 opmode;
/* verify opmode is present */
if (len < IEEE80211_MIN_ACTION_SIZE + 2)
goto invalid;
opmode = mgmt->u.action.u.vht_opmode_notif.operating_mode;
ieee80211_vht_handle_opmode(rx->sdata, rx->sta,
opmode, status->band);
goto handled;
goto queue;
}
case WLAN_VHT_ACTION_GROUPID_MGMT: {
if (len < IEEE80211_MIN_ACTION_SIZE + 25)
@ -3942,21 +3936,31 @@ static bool ieee80211_invoke_fast_rx(struct ieee80211_rx_data *rx,
u64_stats_update_end(&stats->syncp);
if (fast_rx->internal_forward) {
struct sta_info *dsta = sta_info_get(rx->sdata, skb->data);
struct sk_buff *xmit_skb = NULL;
bool multicast = is_multicast_ether_addr(skb->data);
if (dsta) {
if (multicast) {
xmit_skb = skb_copy(skb, GFP_ATOMIC);
} else if (sta_info_get(rx->sdata, skb->data)) {
xmit_skb = skb;
skb = NULL;
}
if (xmit_skb) {
/*
* Send to wireless media and increase priority by 256
* to keep the received priority instead of
* reclassifying the frame (see cfg80211_classify8021d).
*/
skb->priority += 256;
skb->protocol = htons(ETH_P_802_3);
skb_reset_network_header(skb);
skb_reset_mac_header(skb);
dev_queue_xmit(skb);
return true;
xmit_skb->priority += 256;
xmit_skb->protocol = htons(ETH_P_802_3);
skb_reset_network_header(xmit_skb);
skb_reset_mac_header(xmit_skb);
dev_queue_xmit(xmit_skb);
}
if (!skb)
return true;
}
/* deliver to local stack */

View File

@ -1501,8 +1501,8 @@ ieee80211_sta_ps_deliver_response(struct sta_info *sta,
/* This will evaluate to 1, 3, 5 or 7. */
for (ac = IEEE80211_AC_VO; ac < IEEE80211_NUM_ACS; ac++)
if (ignored_acs & BIT(ac))
continue;
if (!(ignored_acs & ieee80211_ac_to_qos_mask[ac]))
break;
tid = 7 - 2 * ac;
ieee80211_send_null_response(sta, tid, reason, true, false);

View File

@ -1243,7 +1243,7 @@ ieee80211_tx_prepare(struct ieee80211_sub_if_data *sdata,
static struct txq_info *ieee80211_get_txq(struct ieee80211_local *local,
struct ieee80211_vif *vif,
struct ieee80211_sta *pubsta,
struct sta_info *sta,
struct sk_buff *skb)
{
struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data;
@ -1257,10 +1257,13 @@ static struct txq_info *ieee80211_get_txq(struct ieee80211_local *local,
if (!ieee80211_is_data(hdr->frame_control))
return NULL;
if (pubsta) {
if (sta) {
u8 tid = skb->priority & IEEE80211_QOS_CTL_TID_MASK;
txq = pubsta->txq[tid];
if (!sta->uploaded)
return NULL;
txq = sta->sta.txq[tid];
} else if (vif) {
txq = vif->txq;
}
@ -1503,23 +1506,17 @@ static bool ieee80211_queue_skb(struct ieee80211_local *local,
struct fq *fq = &local->fq;
struct ieee80211_vif *vif;
struct txq_info *txqi;
struct ieee80211_sta *pubsta;
if (!local->ops->wake_tx_queue ||
sdata->vif.type == NL80211_IFTYPE_MONITOR)
return false;
if (sta && sta->uploaded)
pubsta = &sta->sta;
else
pubsta = NULL;
if (sdata->vif.type == NL80211_IFTYPE_AP_VLAN)
sdata = container_of(sdata->bss,
struct ieee80211_sub_if_data, u.ap);
vif = &sdata->vif;
txqi = ieee80211_get_txq(local, vif, pubsta, skb);
txqi = ieee80211_get_txq(local, vif, sta, skb);
if (!txqi)
return false;

View File

@ -527,8 +527,10 @@ void ieee80211_vht_handle_opmode(struct ieee80211_sub_if_data *sdata,
u32 changed = __ieee80211_vht_handle_opmode(sdata, sta, opmode, band);
if (changed > 0)
if (changed > 0) {
ieee80211_recalc_min_chandef(sdata);
rate_control_rate_update(local, sband, sta, changed);
}
}
void ieee80211_get_vht_mask_from_cap(__le16 vht_cap,

View File

@ -514,7 +514,7 @@ static int ovs_ct_nat_execute(struct sk_buff *skb, struct nf_conn *ct,
int hooknum, nh_off, err = NF_ACCEPT;
nh_off = skb_network_offset(skb);
skb_pull(skb, nh_off);
skb_pull_rcsum(skb, nh_off);
/* See HOOK2MANIP(). */
if (maniptype == NF_NAT_MANIP_SRC)
@ -579,6 +579,7 @@ static int ovs_ct_nat_execute(struct sk_buff *skb, struct nf_conn *ct,
err = nf_nat_packet(ct, ctinfo, hooknum, skb);
push:
skb_push(skb, nh_off);
skb_postpush_rcsum(skb, skb->data, nh_off);
return err;
}
@ -886,7 +887,7 @@ int ovs_ct_execute(struct net *net, struct sk_buff *skb,
/* The conntrack module expects to be working at L3. */
nh_ofs = skb_network_offset(skb);
skb_pull(skb, nh_ofs);
skb_pull_rcsum(skb, nh_ofs);
if (key->ip.frag != OVS_FRAG_TYPE_NONE) {
err = handle_fragments(net, key, info->zone.id, skb);
@ -900,6 +901,7 @@ int ovs_ct_execute(struct net *net, struct sk_buff *skb,
err = ovs_ct_lookup(net, key, info, skb);
skb_push(skb, nh_ofs);
skb_postpush_rcsum(skb, skb->data, nh_ofs);
if (err)
kfree_skb(skb);
return err;

View File

@ -900,8 +900,6 @@ tca_action_gd(struct net *net, struct nlattr *nla, struct nlmsghdr *n,
goto err;
}
act->order = i;
if (event == RTM_GETACTION)
act->tcfa_refcnt++;
list_add_tail(&act->list, &actions);
}
@ -914,7 +912,8 @@ tca_action_gd(struct net *net, struct nlattr *nla, struct nlmsghdr *n,
return ret;
}
err:
tcf_action_destroy(&actions, 0);
if (event != RTM_GETACTION)
tcf_action_destroy(&actions, 0);
return ret;
}

View File

@ -123,12 +123,11 @@ static int tcf_bpf_dump_ebpf_info(const struct tcf_bpf *prog,
nla_put_string(skb, TCA_ACT_BPF_NAME, prog->bpf_name))
return -EMSGSIZE;
nla = nla_reserve(skb, TCA_ACT_BPF_DIGEST,
sizeof(prog->filter->digest));
nla = nla_reserve(skb, TCA_ACT_BPF_TAG, sizeof(prog->filter->tag));
if (nla == NULL)
return -EMSGSIZE;
memcpy(nla_data(nla), prog->filter->digest, nla_len(nla));
memcpy(nla_data(nla), prog->filter->tag, nla_len(nla));
return 0;
}

View File

@ -555,11 +555,11 @@ static int cls_bpf_dump_ebpf_info(const struct cls_bpf_prog *prog,
nla_put_string(skb, TCA_BPF_NAME, prog->bpf_name))
return -EMSGSIZE;
nla = nla_reserve(skb, TCA_BPF_DIGEST, sizeof(prog->filter->digest));
nla = nla_reserve(skb, TCA_BPF_TAG, sizeof(prog->filter->tag));
if (nla == NULL)
return -EMSGSIZE;
memcpy(nla_data(nla), prog->filter->digest, nla_len(nla));
memcpy(nla_data(nla), prog->filter->tag, nla_len(nla));
return 0;
}

View File

@ -169,7 +169,7 @@ void tipc_disc_rcv(struct net *net, struct sk_buff *skb,
/* Send response, if necessary */
if (respond && (mtyp == DSC_REQ_MSG)) {
rskb = tipc_buf_acquire(MAX_H_SIZE);
rskb = tipc_buf_acquire(MAX_H_SIZE, GFP_ATOMIC);
if (!rskb)
return;
tipc_disc_init_msg(net, rskb, DSC_RESP_MSG, bearer);
@ -278,7 +278,7 @@ int tipc_disc_create(struct net *net, struct tipc_bearer *b,
req = kmalloc(sizeof(*req), GFP_ATOMIC);
if (!req)
return -ENOMEM;
req->buf = tipc_buf_acquire(MAX_H_SIZE);
req->buf = tipc_buf_acquire(MAX_H_SIZE, GFP_ATOMIC);
if (!req->buf) {
kfree(req);
return -ENOMEM;

View File

@ -1395,7 +1395,7 @@ tnl:
msg_set_seqno(hdr, seqno++);
pktlen = msg_size(hdr);
msg_set_size(&tnlhdr, pktlen + INT_H_SIZE);
tnlskb = tipc_buf_acquire(pktlen + INT_H_SIZE);
tnlskb = tipc_buf_acquire(pktlen + INT_H_SIZE, GFP_ATOMIC);
if (!tnlskb) {
pr_warn("%sunable to send packet\n", link_co_err);
return;

View File

@ -58,12 +58,12 @@ static unsigned int align(unsigned int i)
* NOTE: Headroom is reserved to allow prepending of a data link header.
* There may also be unrequested tailroom present at the buffer's end.
*/
struct sk_buff *tipc_buf_acquire(u32 size)
struct sk_buff *tipc_buf_acquire(u32 size, gfp_t gfp)
{
struct sk_buff *skb;
unsigned int buf_size = (BUF_HEADROOM + size + 3) & ~3u;
skb = alloc_skb_fclone(buf_size, GFP_ATOMIC);
skb = alloc_skb_fclone(buf_size, gfp);
if (skb) {
skb_reserve(skb, BUF_HEADROOM);
skb_put(skb, size);
@ -95,7 +95,7 @@ struct sk_buff *tipc_msg_create(uint user, uint type,
struct tipc_msg *msg;
struct sk_buff *buf;
buf = tipc_buf_acquire(hdr_sz + data_sz);
buf = tipc_buf_acquire(hdr_sz + data_sz, GFP_ATOMIC);
if (unlikely(!buf))
return NULL;
@ -261,7 +261,7 @@ int tipc_msg_build(struct tipc_msg *mhdr, struct msghdr *m,
/* No fragmentation needed? */
if (likely(msz <= pktmax)) {
skb = tipc_buf_acquire(msz);
skb = tipc_buf_acquire(msz, GFP_KERNEL);
if (unlikely(!skb))
return -ENOMEM;
skb_orphan(skb);
@ -282,7 +282,7 @@ int tipc_msg_build(struct tipc_msg *mhdr, struct msghdr *m,
msg_set_importance(&pkthdr, msg_importance(mhdr));
/* Prepare first fragment */
skb = tipc_buf_acquire(pktmax);
skb = tipc_buf_acquire(pktmax, GFP_KERNEL);
if (!skb)
return -ENOMEM;
skb_orphan(skb);
@ -313,7 +313,7 @@ int tipc_msg_build(struct tipc_msg *mhdr, struct msghdr *m,
pktsz = drem + INT_H_SIZE;
else
pktsz = pktmax;
skb = tipc_buf_acquire(pktsz);
skb = tipc_buf_acquire(pktsz, GFP_KERNEL);
if (!skb) {
rc = -ENOMEM;
goto error;
@ -448,7 +448,7 @@ bool tipc_msg_make_bundle(struct sk_buff **skb, struct tipc_msg *msg,
if (msz > (max / 2))
return false;
_skb = tipc_buf_acquire(max);
_skb = tipc_buf_acquire(max, GFP_ATOMIC);
if (!_skb)
return false;
@ -496,7 +496,7 @@ bool tipc_msg_reverse(u32 own_node, struct sk_buff **skb, int err)
/* Never return SHORT header; expand by replacing buffer if necessary */
if (msg_short(hdr)) {
*skb = tipc_buf_acquire(BASIC_H_SIZE + dlen);
*skb = tipc_buf_acquire(BASIC_H_SIZE + dlen, GFP_ATOMIC);
if (!*skb)
goto exit;
memcpy((*skb)->data + BASIC_H_SIZE, msg_data(hdr), dlen);

View File

@ -820,7 +820,7 @@ static inline bool msg_is_reset(struct tipc_msg *hdr)
return (msg_user(hdr) == LINK_PROTOCOL) && (msg_type(hdr) == RESET_MSG);
}
struct sk_buff *tipc_buf_acquire(u32 size);
struct sk_buff *tipc_buf_acquire(u32 size, gfp_t gfp);
bool tipc_msg_validate(struct sk_buff *skb);
bool tipc_msg_reverse(u32 own_addr, struct sk_buff **skb, int err);
void tipc_msg_init(u32 own_addr, struct tipc_msg *m, u32 user, u32 type,

View File

@ -69,7 +69,7 @@ static struct sk_buff *named_prepare_buf(struct net *net, u32 type, u32 size,
u32 dest)
{
struct tipc_net *tn = net_generic(net, tipc_net_id);
struct sk_buff *buf = tipc_buf_acquire(INT_H_SIZE + size);
struct sk_buff *buf = tipc_buf_acquire(INT_H_SIZE + size, GFP_ATOMIC);
struct tipc_msg *msg;
if (buf != NULL) {

View File

@ -4615,6 +4615,15 @@ int cfg80211_check_station_change(struct wiphy *wiphy,
break;
}
/*
* Older kernel versions ignored this attribute entirely, so don't
* reject attempts to update it but mark it as unused instead so the
* driver won't look at the data.
*/
if (statype != CFG80211_STA_AP_CLIENT_UNASSOC &&
statype != CFG80211_STA_TDLS_PEER_SETUP)
params->opmode_notif_used = false;
return 0;
}
EXPORT_SYMBOL(cfg80211_check_station_change);
@ -4854,6 +4863,12 @@ static int nl80211_set_station(struct sk_buff *skb, struct genl_info *info)
params.local_pm = pm;
}
if (info->attrs[NL80211_ATTR_OPMODE_NOTIF]) {
params.opmode_notif_used = true;
params.opmode_notif =
nla_get_u8(info->attrs[NL80211_ATTR_OPMODE_NOTIF]);
}
/* Include parameters for TDLS peer (will check later) */
err = nl80211_set_station_tdls(info, &params);
if (err)