Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net
Pull networking fixes from David Miller: 1) Fix skb leak in mac802154, from Martin Townsend 2) Use select not depends on NF_NAT for NFT_NAT, from Pablo Neira Ayuso 3) Fix union initializer bogosity in vxlan, from Gerhard Stenzel 4) Fix RX checksum configuration in stmmac driver, from Giuseppe CAVALLARO 5) Fix TSO with non-accelerated VLANs in e1000, e1000e, bna, ehea, i40e, i40evf, mvneta, and qlge, from Vlad Yasevich 6) Fix capability checks in phy_init_eee(), from Giuseppe CAVALLARO 7) Try high order allocations more sanely for SKBs, specifically if a high order allocation fails, fall back directly to zero order pages rather than iterating down one order at a time. From Eric Dumazet 8) Fix a memory leak in openvswitch, from Li RongQing 9) amd-xgbe initializes wrong spinlock, from Thomas Lendacky 10) RTNL locking was busted in setsockopt for anycast and multicast, fix from Sabrina Dubroca 11) Fix peer address refcount leak in ipv6, from Nicolas Dichtel 12) DocBook typo fixes, from Masanari Iida * git://git.kernel.org/pub/scm/linux/kernel/git/davem/net: (101 commits) ipv6: restore the behavior of ipv6_sock_ac_drop() amd-xgbe: Enable interrupts for all management counters amd-xgbe: Treat certain counter registers as 64 bit greth: moved TX ring cleaning to NAPI rx poll func cnic : Cleanup CONFIG_IPV6 & VLAN check net: treewide: Fix typo found in DocBook/networking.xml bnx2x: Fix link problems for 1G SFP RJ45 module 3c59x: avoid panic in boomerang_start_xmit when finding page address: netfilter: add explicit Kconfig for NETFILTER_XT_NAT ipv6: use addrconf_get_prefix_route() to remove peer addr ipv6: fix a refcnt leak with peer addr net-timestamp: only report sw timestamp if reporting bit is set drivers/net/fddi/skfp/h/skfbi.h: Remove useless PCI_BASE_2ND macros l2tp: fix race while getting PMTU on PPP pseudo-wire ipv6: fix rtnl locking in setsockopt for anycast and multicast VMXNET3: Check for map error in vmxnet3_set_mc openvswitch: distinguish between the dropped and consumed skb amd-xgbe: Fix initialization of the wrong spin lock openvswitch: fix a memory leak netfilter: fix missing dependencies in NETFILTER_XT_TARGET_LOG ...
This commit is contained in:
commit
b531f5dd9c
@ -39,6 +39,10 @@ Optional properties:
|
||||
further clocks may be specified in derived bindings.
|
||||
- clock-names: One name for each entry in the clocks property, the
|
||||
first one should be "stmmaceth".
|
||||
- clk_ptp_ref: this is the PTP reference clock; in case of the PTP is
|
||||
available this clock is used for programming the Timestamp Addend Register.
|
||||
If not passed then the system clock will be used and this is fine on some
|
||||
platforms.
|
||||
|
||||
Examples:
|
||||
|
||||
|
@ -282,6 +282,7 @@ static const struct pci_device_id bcma_pci_bridge_tbl[] = {
|
||||
{ PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, 0x43a9) },
|
||||
{ PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, 0x43aa) },
|
||||
{ PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, 0x4727) },
|
||||
{ PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, 43227) }, /* 0xA8DB */
|
||||
{ 0, },
|
||||
};
|
||||
MODULE_DEVICE_TABLE(pci, bcma_pci_bridge_tbl);
|
||||
|
@ -1089,6 +1089,30 @@ static int __mlx4_ib_destroy_flow(struct mlx4_dev *dev, u64 reg_id)
|
||||
return err;
|
||||
}
|
||||
|
||||
static int mlx4_ib_tunnel_steer_add(struct ib_qp *qp, struct ib_flow_attr *flow_attr,
|
||||
u64 *reg_id)
|
||||
{
|
||||
void *ib_flow;
|
||||
union ib_flow_spec *ib_spec;
|
||||
struct mlx4_dev *dev = to_mdev(qp->device)->dev;
|
||||
int err = 0;
|
||||
|
||||
if (dev->caps.tunnel_offload_mode != MLX4_TUNNEL_OFFLOAD_MODE_VXLAN)
|
||||
return 0; /* do nothing */
|
||||
|
||||
ib_flow = flow_attr + 1;
|
||||
ib_spec = (union ib_flow_spec *)ib_flow;
|
||||
|
||||
if (ib_spec->type != IB_FLOW_SPEC_ETH || flow_attr->num_of_specs != 1)
|
||||
return 0; /* do nothing */
|
||||
|
||||
err = mlx4_tunnel_steer_add(to_mdev(qp->device)->dev, ib_spec->eth.val.dst_mac,
|
||||
flow_attr->port, qp->qp_num,
|
||||
MLX4_DOMAIN_UVERBS | (flow_attr->priority & 0xff),
|
||||
reg_id);
|
||||
return err;
|
||||
}
|
||||
|
||||
static struct ib_flow *mlx4_ib_create_flow(struct ib_qp *qp,
|
||||
struct ib_flow_attr *flow_attr,
|
||||
int domain)
|
||||
@ -1136,6 +1160,12 @@ static struct ib_flow *mlx4_ib_create_flow(struct ib_qp *qp,
|
||||
i++;
|
||||
}
|
||||
|
||||
if (i < ARRAY_SIZE(type) && flow_attr->type == IB_FLOW_ATTR_NORMAL) {
|
||||
err = mlx4_ib_tunnel_steer_add(qp, flow_attr, &mflow->reg_id[i]);
|
||||
if (err)
|
||||
goto err_free;
|
||||
}
|
||||
|
||||
return &mflow->ibflow;
|
||||
|
||||
err_free:
|
||||
|
@ -1677,9 +1677,15 @@ static int __mlx4_ib_modify_qp(struct ib_qp *ibqp,
|
||||
}
|
||||
}
|
||||
|
||||
if (qp->ibqp.qp_type == IB_QPT_RAW_PACKET)
|
||||
if (qp->ibqp.qp_type == IB_QPT_RAW_PACKET) {
|
||||
context->pri_path.ackto = (context->pri_path.ackto & 0xf8) |
|
||||
MLX4_IB_LINK_TYPE_ETH;
|
||||
if (dev->dev->caps.tunnel_offload_mode == MLX4_TUNNEL_OFFLOAD_MODE_VXLAN) {
|
||||
/* set QP to receive both tunneled & non-tunneled packets */
|
||||
if (!(context->flags & (1 << MLX4_RSS_QPC_FLAG_OFFSET)))
|
||||
context->srqn = cpu_to_be32(7 << 28);
|
||||
}
|
||||
}
|
||||
|
||||
if (ibqp->qp_type == IB_QPT_UD && (new_state == IB_QPS_RTR)) {
|
||||
int is_eth = rdma_port_get_link_layer(
|
||||
|
@ -2177,10 +2177,10 @@ boomerang_start_xmit(struct sk_buff *skb, struct net_device *dev)
|
||||
skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
|
||||
|
||||
vp->tx_ring[entry].frag[i+1].addr =
|
||||
cpu_to_le32(pci_map_single(
|
||||
VORTEX_PCI(vp),
|
||||
(void *)skb_frag_address(frag),
|
||||
skb_frag_size(frag), PCI_DMA_TODEVICE));
|
||||
cpu_to_le32(skb_frag_dma_map(
|
||||
&VORTEX_PCI(vp)->dev,
|
||||
frag,
|
||||
frag->page_offset, frag->size, DMA_TO_DEVICE));
|
||||
|
||||
if (i == skb_shinfo(skb)->nr_frags-1)
|
||||
vp->tx_ring[entry].frag[i+1].length = cpu_to_le32(skb_frag_size(frag)|LAST_FRAG);
|
||||
|
@ -123,6 +123,12 @@ static inline void greth_enable_tx(struct greth_private *greth)
|
||||
GRETH_REGORIN(greth->regs->control, GRETH_TXEN);
|
||||
}
|
||||
|
||||
static inline void greth_enable_tx_and_irq(struct greth_private *greth)
|
||||
{
|
||||
wmb(); /* BDs must been written to memory before enabling TX */
|
||||
GRETH_REGORIN(greth->regs->control, GRETH_TXEN | GRETH_TXI);
|
||||
}
|
||||
|
||||
static inline void greth_disable_tx(struct greth_private *greth)
|
||||
{
|
||||
GRETH_REGANDIN(greth->regs->control, ~GRETH_TXEN);
|
||||
@ -447,29 +453,30 @@ out:
|
||||
return err;
|
||||
}
|
||||
|
||||
static inline u16 greth_num_free_bds(u16 tx_last, u16 tx_next)
|
||||
{
|
||||
if (tx_next < tx_last)
|
||||
return (tx_last - tx_next) - 1;
|
||||
else
|
||||
return GRETH_TXBD_NUM - (tx_next - tx_last) - 1;
|
||||
}
|
||||
|
||||
static netdev_tx_t
|
||||
greth_start_xmit_gbit(struct sk_buff *skb, struct net_device *dev)
|
||||
{
|
||||
struct greth_private *greth = netdev_priv(dev);
|
||||
struct greth_bd *bdp;
|
||||
u32 status = 0, dma_addr, ctrl;
|
||||
u32 status, dma_addr;
|
||||
int curr_tx, nr_frags, i, err = NETDEV_TX_OK;
|
||||
unsigned long flags;
|
||||
u16 tx_last;
|
||||
|
||||
nr_frags = skb_shinfo(skb)->nr_frags;
|
||||
tx_last = greth->tx_last;
|
||||
rmb(); /* tx_last is updated by the poll task */
|
||||
|
||||
/* Clean TX Ring */
|
||||
greth_clean_tx_gbit(dev);
|
||||
|
||||
if (greth->tx_free < nr_frags + 1) {
|
||||
spin_lock_irqsave(&greth->devlock, flags);/*save from poll/irq*/
|
||||
ctrl = GRETH_REGLOAD(greth->regs->control);
|
||||
/* Enable TX IRQ only if not already in poll() routine */
|
||||
if (ctrl & GRETH_RXI)
|
||||
GRETH_REGSAVE(greth->regs->control, ctrl | GRETH_TXI);
|
||||
if (greth_num_free_bds(tx_last, greth->tx_next) < nr_frags + 1) {
|
||||
netif_stop_queue(dev);
|
||||
spin_unlock_irqrestore(&greth->devlock, flags);
|
||||
err = NETDEV_TX_BUSY;
|
||||
goto out;
|
||||
}
|
||||
@ -488,6 +495,8 @@ greth_start_xmit_gbit(struct sk_buff *skb, struct net_device *dev)
|
||||
/* Linear buf */
|
||||
if (nr_frags != 0)
|
||||
status = GRETH_TXBD_MORE;
|
||||
else
|
||||
status = GRETH_BD_IE;
|
||||
|
||||
if (skb->ip_summed == CHECKSUM_PARTIAL)
|
||||
status |= GRETH_TXBD_CSALL;
|
||||
@ -545,14 +554,12 @@ greth_start_xmit_gbit(struct sk_buff *skb, struct net_device *dev)
|
||||
|
||||
/* Enable the descriptor chain by enabling the first descriptor */
|
||||
bdp = greth->tx_bd_base + greth->tx_next;
|
||||
greth_write_bd(&bdp->stat, greth_read_bd(&bdp->stat) | GRETH_BD_EN);
|
||||
greth->tx_next = curr_tx;
|
||||
greth->tx_free -= nr_frags + 1;
|
||||
|
||||
wmb();
|
||||
greth_write_bd(&bdp->stat,
|
||||
greth_read_bd(&bdp->stat) | GRETH_BD_EN);
|
||||
|
||||
spin_lock_irqsave(&greth->devlock, flags); /*save from poll/irq*/
|
||||
greth_enable_tx(greth);
|
||||
greth->tx_next = curr_tx;
|
||||
greth_enable_tx_and_irq(greth);
|
||||
spin_unlock_irqrestore(&greth->devlock, flags);
|
||||
|
||||
return NETDEV_TX_OK;
|
||||
@ -648,7 +655,6 @@ static void greth_clean_tx(struct net_device *dev)
|
||||
if (greth->tx_free > 0) {
|
||||
netif_wake_queue(dev);
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
static inline void greth_update_tx_stats(struct net_device *dev, u32 stat)
|
||||
@ -670,20 +676,22 @@ static void greth_clean_tx_gbit(struct net_device *dev)
|
||||
{
|
||||
struct greth_private *greth;
|
||||
struct greth_bd *bdp, *bdp_last_frag;
|
||||
struct sk_buff *skb;
|
||||
struct sk_buff *skb = NULL;
|
||||
u32 stat;
|
||||
int nr_frags, i;
|
||||
u16 tx_last;
|
||||
|
||||
greth = netdev_priv(dev);
|
||||
tx_last = greth->tx_last;
|
||||
|
||||
while (greth->tx_free < GRETH_TXBD_NUM) {
|
||||
while (tx_last != greth->tx_next) {
|
||||
|
||||
skb = greth->tx_skbuff[greth->tx_last];
|
||||
skb = greth->tx_skbuff[tx_last];
|
||||
|
||||
nr_frags = skb_shinfo(skb)->nr_frags;
|
||||
|
||||
/* We only clean fully completed SKBs */
|
||||
bdp_last_frag = greth->tx_bd_base + SKIP_TX(greth->tx_last, nr_frags);
|
||||
bdp_last_frag = greth->tx_bd_base + SKIP_TX(tx_last, nr_frags);
|
||||
|
||||
GRETH_REGSAVE(greth->regs->status, GRETH_INT_TE | GRETH_INT_TX);
|
||||
mb();
|
||||
@ -692,14 +700,14 @@ static void greth_clean_tx_gbit(struct net_device *dev)
|
||||
if (stat & GRETH_BD_EN)
|
||||
break;
|
||||
|
||||
greth->tx_skbuff[greth->tx_last] = NULL;
|
||||
greth->tx_skbuff[tx_last] = NULL;
|
||||
|
||||
greth_update_tx_stats(dev, stat);
|
||||
dev->stats.tx_bytes += skb->len;
|
||||
|
||||
bdp = greth->tx_bd_base + greth->tx_last;
|
||||
bdp = greth->tx_bd_base + tx_last;
|
||||
|
||||
greth->tx_last = NEXT_TX(greth->tx_last);
|
||||
tx_last = NEXT_TX(tx_last);
|
||||
|
||||
dma_unmap_single(greth->dev,
|
||||
greth_read_bd(&bdp->addr),
|
||||
@ -708,21 +716,26 @@ static void greth_clean_tx_gbit(struct net_device *dev)
|
||||
|
||||
for (i = 0; i < nr_frags; i++) {
|
||||
skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
|
||||
bdp = greth->tx_bd_base + greth->tx_last;
|
||||
bdp = greth->tx_bd_base + tx_last;
|
||||
|
||||
dma_unmap_page(greth->dev,
|
||||
greth_read_bd(&bdp->addr),
|
||||
skb_frag_size(frag),
|
||||
DMA_TO_DEVICE);
|
||||
|
||||
greth->tx_last = NEXT_TX(greth->tx_last);
|
||||
tx_last = NEXT_TX(tx_last);
|
||||
}
|
||||
greth->tx_free += nr_frags+1;
|
||||
dev_kfree_skb(skb);
|
||||
}
|
||||
if (skb) { /* skb is set only if the above while loop was entered */
|
||||
wmb();
|
||||
greth->tx_last = tx_last;
|
||||
|
||||
if (netif_queue_stopped(dev) && (greth->tx_free > (MAX_SKB_FRAGS+1)))
|
||||
netif_wake_queue(dev);
|
||||
if (netif_queue_stopped(dev) &&
|
||||
(greth_num_free_bds(tx_last, greth->tx_next) >
|
||||
(MAX_SKB_FRAGS+1)))
|
||||
netif_wake_queue(dev);
|
||||
}
|
||||
}
|
||||
|
||||
static int greth_rx(struct net_device *dev, int limit)
|
||||
@ -965,16 +978,12 @@ static int greth_poll(struct napi_struct *napi, int budget)
|
||||
greth = container_of(napi, struct greth_private, napi);
|
||||
|
||||
restart_txrx_poll:
|
||||
if (netif_queue_stopped(greth->netdev)) {
|
||||
if (greth->gbit_mac)
|
||||
greth_clean_tx_gbit(greth->netdev);
|
||||
else
|
||||
greth_clean_tx(greth->netdev);
|
||||
}
|
||||
|
||||
if (greth->gbit_mac) {
|
||||
greth_clean_tx_gbit(greth->netdev);
|
||||
work_done += greth_rx_gbit(greth->netdev, budget - work_done);
|
||||
} else {
|
||||
if (netif_queue_stopped(greth->netdev))
|
||||
greth_clean_tx(greth->netdev);
|
||||
work_done += greth_rx(greth->netdev, budget - work_done);
|
||||
}
|
||||
|
||||
@ -983,7 +992,8 @@ restart_txrx_poll:
|
||||
spin_lock_irqsave(&greth->devlock, flags);
|
||||
|
||||
ctrl = GRETH_REGLOAD(greth->regs->control);
|
||||
if (netif_queue_stopped(greth->netdev)) {
|
||||
if ((greth->gbit_mac && (greth->tx_last != greth->tx_next)) ||
|
||||
(!greth->gbit_mac && netif_queue_stopped(greth->netdev))) {
|
||||
GRETH_REGSAVE(greth->regs->control,
|
||||
ctrl | GRETH_TXI | GRETH_RXI);
|
||||
mask = GRETH_INT_RX | GRETH_INT_RE |
|
||||
|
@ -107,7 +107,7 @@ struct greth_private {
|
||||
|
||||
u16 tx_next;
|
||||
u16 tx_last;
|
||||
u16 tx_free;
|
||||
u16 tx_free; /* only used on 10/100Mbit */
|
||||
u16 rx_cur;
|
||||
|
||||
struct greth_regs *regs; /* Address of controller registers. */
|
||||
|
@ -272,8 +272,8 @@ static ssize_t xpcs_reg_value_read(struct file *filp, char __user *buffer,
|
||||
struct xgbe_prv_data *pdata = filp->private_data;
|
||||
unsigned int value;
|
||||
|
||||
value = pdata->hw_if.read_mmd_regs(pdata, pdata->debugfs_xpcs_mmd,
|
||||
pdata->debugfs_xpcs_reg);
|
||||
value = XMDIO_READ(pdata, pdata->debugfs_xpcs_mmd,
|
||||
pdata->debugfs_xpcs_reg);
|
||||
|
||||
return xgbe_common_read(buffer, count, ppos, value);
|
||||
}
|
||||
@ -290,8 +290,8 @@ static ssize_t xpcs_reg_value_write(struct file *filp,
|
||||
if (len < 0)
|
||||
return len;
|
||||
|
||||
pdata->hw_if.write_mmd_regs(pdata, pdata->debugfs_xpcs_mmd,
|
||||
pdata->debugfs_xpcs_reg, value);
|
||||
XMDIO_WRITE(pdata, pdata->debugfs_xpcs_mmd, pdata->debugfs_xpcs_reg,
|
||||
value);
|
||||
|
||||
return len;
|
||||
}
|
||||
|
@ -348,7 +348,7 @@ static int xgbe_disable_tx_flow_control(struct xgbe_prv_data *pdata)
|
||||
|
||||
/* Clear MAC flow control */
|
||||
max_q_count = XGMAC_MAX_FLOW_CONTROL_QUEUES;
|
||||
q_count = min_t(unsigned int, pdata->rx_q_count, max_q_count);
|
||||
q_count = min_t(unsigned int, pdata->tx_q_count, max_q_count);
|
||||
reg = MAC_Q0TFCR;
|
||||
for (i = 0; i < q_count; i++) {
|
||||
reg_val = XGMAC_IOREAD(pdata, reg);
|
||||
@ -373,7 +373,7 @@ static int xgbe_enable_tx_flow_control(struct xgbe_prv_data *pdata)
|
||||
|
||||
/* Set MAC flow control */
|
||||
max_q_count = XGMAC_MAX_FLOW_CONTROL_QUEUES;
|
||||
q_count = min_t(unsigned int, pdata->rx_q_count, max_q_count);
|
||||
q_count = min_t(unsigned int, pdata->tx_q_count, max_q_count);
|
||||
reg = MAC_Q0TFCR;
|
||||
for (i = 0; i < q_count; i++) {
|
||||
reg_val = XGMAC_IOREAD(pdata, reg);
|
||||
@ -509,8 +509,8 @@ static void xgbe_enable_mac_interrupts(struct xgbe_prv_data *pdata)
|
||||
XGMAC_IOWRITE(pdata, MAC_IER, mac_ier);
|
||||
|
||||
/* Enable all counter interrupts */
|
||||
XGMAC_IOWRITE_BITS(pdata, MMC_RIER, ALL_INTERRUPTS, 0xff);
|
||||
XGMAC_IOWRITE_BITS(pdata, MMC_TIER, ALL_INTERRUPTS, 0xff);
|
||||
XGMAC_IOWRITE_BITS(pdata, MMC_RIER, ALL_INTERRUPTS, 0xffffffff);
|
||||
XGMAC_IOWRITE_BITS(pdata, MMC_TIER, ALL_INTERRUPTS, 0xffffffff);
|
||||
}
|
||||
|
||||
static int xgbe_set_gmii_speed(struct xgbe_prv_data *pdata)
|
||||
@ -1633,6 +1633,9 @@ static int xgbe_flush_tx_queues(struct xgbe_prv_data *pdata)
|
||||
{
|
||||
unsigned int i, count;
|
||||
|
||||
if (XGMAC_GET_BITS(pdata->hw_feat.version, MAC_VR, SNPSVER) < 0x21)
|
||||
return 0;
|
||||
|
||||
for (i = 0; i < pdata->tx_q_count; i++)
|
||||
XGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_Q_TQOMR, FTQ, 1);
|
||||
|
||||
@ -1703,8 +1706,8 @@ static void xgbe_config_mtl_mode(struct xgbe_prv_data *pdata)
|
||||
XGMAC_IOWRITE_BITS(pdata, MTL_OMR, RAA, MTL_RAA_SP);
|
||||
}
|
||||
|
||||
static unsigned int xgbe_calculate_per_queue_fifo(unsigned long fifo_size,
|
||||
unsigned char queue_count)
|
||||
static unsigned int xgbe_calculate_per_queue_fifo(unsigned int fifo_size,
|
||||
unsigned int queue_count)
|
||||
{
|
||||
unsigned int q_fifo_size = 0;
|
||||
enum xgbe_mtl_fifo_size p_fifo = XGMAC_MTL_FIFO_SIZE_256;
|
||||
@ -1748,6 +1751,10 @@ static unsigned int xgbe_calculate_per_queue_fifo(unsigned long fifo_size,
|
||||
q_fifo_size = XGBE_FIFO_SIZE_KB(256);
|
||||
break;
|
||||
}
|
||||
|
||||
/* The configured value is not the actual amount of fifo RAM */
|
||||
q_fifo_size = min_t(unsigned int, XGBE_FIFO_MAX, q_fifo_size);
|
||||
|
||||
q_fifo_size = q_fifo_size / queue_count;
|
||||
|
||||
/* Set the queue fifo size programmable value */
|
||||
@ -1947,6 +1954,32 @@ static void xgbe_config_vlan_support(struct xgbe_prv_data *pdata)
|
||||
xgbe_disable_rx_vlan_stripping(pdata);
|
||||
}
|
||||
|
||||
static u64 xgbe_mmc_read(struct xgbe_prv_data *pdata, unsigned int reg_lo)
|
||||
{
|
||||
bool read_hi;
|
||||
u64 val;
|
||||
|
||||
switch (reg_lo) {
|
||||
/* These registers are always 64 bit */
|
||||
case MMC_TXOCTETCOUNT_GB_LO:
|
||||
case MMC_TXOCTETCOUNT_G_LO:
|
||||
case MMC_RXOCTETCOUNT_GB_LO:
|
||||
case MMC_RXOCTETCOUNT_G_LO:
|
||||
read_hi = true;
|
||||
break;
|
||||
|
||||
default:
|
||||
read_hi = false;
|
||||
};
|
||||
|
||||
val = XGMAC_IOREAD(pdata, reg_lo);
|
||||
|
||||
if (read_hi)
|
||||
val |= ((u64)XGMAC_IOREAD(pdata, reg_lo + 4) << 32);
|
||||
|
||||
return val;
|
||||
}
|
||||
|
||||
static void xgbe_tx_mmc_int(struct xgbe_prv_data *pdata)
|
||||
{
|
||||
struct xgbe_mmc_stats *stats = &pdata->mmc_stats;
|
||||
@ -1954,75 +1987,75 @@ static void xgbe_tx_mmc_int(struct xgbe_prv_data *pdata)
|
||||
|
||||
if (XGMAC_GET_BITS(mmc_isr, MMC_TISR, TXOCTETCOUNT_GB))
|
||||
stats->txoctetcount_gb +=
|
||||
XGMAC_IOREAD(pdata, MMC_TXOCTETCOUNT_GB_LO);
|
||||
xgbe_mmc_read(pdata, MMC_TXOCTETCOUNT_GB_LO);
|
||||
|
||||
if (XGMAC_GET_BITS(mmc_isr, MMC_TISR, TXFRAMECOUNT_GB))
|
||||
stats->txframecount_gb +=
|
||||
XGMAC_IOREAD(pdata, MMC_TXFRAMECOUNT_GB_LO);
|
||||
xgbe_mmc_read(pdata, MMC_TXFRAMECOUNT_GB_LO);
|
||||
|
||||
if (XGMAC_GET_BITS(mmc_isr, MMC_TISR, TXBROADCASTFRAMES_G))
|
||||
stats->txbroadcastframes_g +=
|
||||
XGMAC_IOREAD(pdata, MMC_TXBROADCASTFRAMES_G_LO);
|
||||
xgbe_mmc_read(pdata, MMC_TXBROADCASTFRAMES_G_LO);
|
||||
|
||||
if (XGMAC_GET_BITS(mmc_isr, MMC_TISR, TXMULTICASTFRAMES_G))
|
||||
stats->txmulticastframes_g +=
|
||||
XGMAC_IOREAD(pdata, MMC_TXMULTICASTFRAMES_G_LO);
|
||||
xgbe_mmc_read(pdata, MMC_TXMULTICASTFRAMES_G_LO);
|
||||
|
||||
if (XGMAC_GET_BITS(mmc_isr, MMC_TISR, TX64OCTETS_GB))
|
||||
stats->tx64octets_gb +=
|
||||
XGMAC_IOREAD(pdata, MMC_TX64OCTETS_GB_LO);
|
||||
xgbe_mmc_read(pdata, MMC_TX64OCTETS_GB_LO);
|
||||
|
||||
if (XGMAC_GET_BITS(mmc_isr, MMC_TISR, TX65TO127OCTETS_GB))
|
||||
stats->tx65to127octets_gb +=
|
||||
XGMAC_IOREAD(pdata, MMC_TX65TO127OCTETS_GB_LO);
|
||||
xgbe_mmc_read(pdata, MMC_TX65TO127OCTETS_GB_LO);
|
||||
|
||||
if (XGMAC_GET_BITS(mmc_isr, MMC_TISR, TX128TO255OCTETS_GB))
|
||||
stats->tx128to255octets_gb +=
|
||||
XGMAC_IOREAD(pdata, MMC_TX128TO255OCTETS_GB_LO);
|
||||
xgbe_mmc_read(pdata, MMC_TX128TO255OCTETS_GB_LO);
|
||||
|
||||
if (XGMAC_GET_BITS(mmc_isr, MMC_TISR, TX256TO511OCTETS_GB))
|
||||
stats->tx256to511octets_gb +=
|
||||
XGMAC_IOREAD(pdata, MMC_TX256TO511OCTETS_GB_LO);
|
||||
xgbe_mmc_read(pdata, MMC_TX256TO511OCTETS_GB_LO);
|
||||
|
||||
if (XGMAC_GET_BITS(mmc_isr, MMC_TISR, TX512TO1023OCTETS_GB))
|
||||
stats->tx512to1023octets_gb +=
|
||||
XGMAC_IOREAD(pdata, MMC_TX512TO1023OCTETS_GB_LO);
|
||||
xgbe_mmc_read(pdata, MMC_TX512TO1023OCTETS_GB_LO);
|
||||
|
||||
if (XGMAC_GET_BITS(mmc_isr, MMC_TISR, TX1024TOMAXOCTETS_GB))
|
||||
stats->tx1024tomaxoctets_gb +=
|
||||
XGMAC_IOREAD(pdata, MMC_TX1024TOMAXOCTETS_GB_LO);
|
||||
xgbe_mmc_read(pdata, MMC_TX1024TOMAXOCTETS_GB_LO);
|
||||
|
||||
if (XGMAC_GET_BITS(mmc_isr, MMC_TISR, TXUNICASTFRAMES_GB))
|
||||
stats->txunicastframes_gb +=
|
||||
XGMAC_IOREAD(pdata, MMC_TXUNICASTFRAMES_GB_LO);
|
||||
xgbe_mmc_read(pdata, MMC_TXUNICASTFRAMES_GB_LO);
|
||||
|
||||
if (XGMAC_GET_BITS(mmc_isr, MMC_TISR, TXMULTICASTFRAMES_GB))
|
||||
stats->txmulticastframes_gb +=
|
||||
XGMAC_IOREAD(pdata, MMC_TXMULTICASTFRAMES_GB_LO);
|
||||
xgbe_mmc_read(pdata, MMC_TXMULTICASTFRAMES_GB_LO);
|
||||
|
||||
if (XGMAC_GET_BITS(mmc_isr, MMC_TISR, TXBROADCASTFRAMES_GB))
|
||||
stats->txbroadcastframes_g +=
|
||||
XGMAC_IOREAD(pdata, MMC_TXBROADCASTFRAMES_GB_LO);
|
||||
xgbe_mmc_read(pdata, MMC_TXBROADCASTFRAMES_GB_LO);
|
||||
|
||||
if (XGMAC_GET_BITS(mmc_isr, MMC_TISR, TXUNDERFLOWERROR))
|
||||
stats->txunderflowerror +=
|
||||
XGMAC_IOREAD(pdata, MMC_TXUNDERFLOWERROR_LO);
|
||||
xgbe_mmc_read(pdata, MMC_TXUNDERFLOWERROR_LO);
|
||||
|
||||
if (XGMAC_GET_BITS(mmc_isr, MMC_TISR, TXOCTETCOUNT_G))
|
||||
stats->txoctetcount_g +=
|
||||
XGMAC_IOREAD(pdata, MMC_TXOCTETCOUNT_G_LO);
|
||||
xgbe_mmc_read(pdata, MMC_TXOCTETCOUNT_G_LO);
|
||||
|
||||
if (XGMAC_GET_BITS(mmc_isr, MMC_TISR, TXFRAMECOUNT_G))
|
||||
stats->txframecount_g +=
|
||||
XGMAC_IOREAD(pdata, MMC_TXFRAMECOUNT_G_LO);
|
||||
xgbe_mmc_read(pdata, MMC_TXFRAMECOUNT_G_LO);
|
||||
|
||||
if (XGMAC_GET_BITS(mmc_isr, MMC_TISR, TXPAUSEFRAMES))
|
||||
stats->txpauseframes +=
|
||||
XGMAC_IOREAD(pdata, MMC_TXPAUSEFRAMES_LO);
|
||||
xgbe_mmc_read(pdata, MMC_TXPAUSEFRAMES_LO);
|
||||
|
||||
if (XGMAC_GET_BITS(mmc_isr, MMC_TISR, TXVLANFRAMES_G))
|
||||
stats->txvlanframes_g +=
|
||||
XGMAC_IOREAD(pdata, MMC_TXVLANFRAMES_G_LO);
|
||||
xgbe_mmc_read(pdata, MMC_TXVLANFRAMES_G_LO);
|
||||
}
|
||||
|
||||
static void xgbe_rx_mmc_int(struct xgbe_prv_data *pdata)
|
||||
@ -2032,95 +2065,95 @@ static void xgbe_rx_mmc_int(struct xgbe_prv_data *pdata)
|
||||
|
||||
if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RXFRAMECOUNT_GB))
|
||||
stats->rxframecount_gb +=
|
||||
XGMAC_IOREAD(pdata, MMC_RXFRAMECOUNT_GB_LO);
|
||||
xgbe_mmc_read(pdata, MMC_RXFRAMECOUNT_GB_LO);
|
||||
|
||||
if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RXOCTETCOUNT_GB))
|
||||
stats->rxoctetcount_gb +=
|
||||
XGMAC_IOREAD(pdata, MMC_RXOCTETCOUNT_GB_LO);
|
||||
xgbe_mmc_read(pdata, MMC_RXOCTETCOUNT_GB_LO);
|
||||
|
||||
if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RXOCTETCOUNT_G))
|
||||
stats->rxoctetcount_g +=
|
||||
XGMAC_IOREAD(pdata, MMC_RXOCTETCOUNT_G_LO);
|
||||
xgbe_mmc_read(pdata, MMC_RXOCTETCOUNT_G_LO);
|
||||
|
||||
if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RXBROADCASTFRAMES_G))
|
||||
stats->rxbroadcastframes_g +=
|
||||
XGMAC_IOREAD(pdata, MMC_RXBROADCASTFRAMES_G_LO);
|
||||
xgbe_mmc_read(pdata, MMC_RXBROADCASTFRAMES_G_LO);
|
||||
|
||||
if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RXMULTICASTFRAMES_G))
|
||||
stats->rxmulticastframes_g +=
|
||||
XGMAC_IOREAD(pdata, MMC_RXMULTICASTFRAMES_G_LO);
|
||||
xgbe_mmc_read(pdata, MMC_RXMULTICASTFRAMES_G_LO);
|
||||
|
||||
if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RXCRCERROR))
|
||||
stats->rxcrcerror +=
|
||||
XGMAC_IOREAD(pdata, MMC_RXCRCERROR_LO);
|
||||
xgbe_mmc_read(pdata, MMC_RXCRCERROR_LO);
|
||||
|
||||
if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RXRUNTERROR))
|
||||
stats->rxrunterror +=
|
||||
XGMAC_IOREAD(pdata, MMC_RXRUNTERROR);
|
||||
xgbe_mmc_read(pdata, MMC_RXRUNTERROR);
|
||||
|
||||
if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RXJABBERERROR))
|
||||
stats->rxjabbererror +=
|
||||
XGMAC_IOREAD(pdata, MMC_RXJABBERERROR);
|
||||
xgbe_mmc_read(pdata, MMC_RXJABBERERROR);
|
||||
|
||||
if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RXUNDERSIZE_G))
|
||||
stats->rxundersize_g +=
|
||||
XGMAC_IOREAD(pdata, MMC_RXUNDERSIZE_G);
|
||||
xgbe_mmc_read(pdata, MMC_RXUNDERSIZE_G);
|
||||
|
||||
if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RXOVERSIZE_G))
|
||||
stats->rxoversize_g +=
|
||||
XGMAC_IOREAD(pdata, MMC_RXOVERSIZE_G);
|
||||
xgbe_mmc_read(pdata, MMC_RXOVERSIZE_G);
|
||||
|
||||
if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RX64OCTETS_GB))
|
||||
stats->rx64octets_gb +=
|
||||
XGMAC_IOREAD(pdata, MMC_RX64OCTETS_GB_LO);
|
||||
xgbe_mmc_read(pdata, MMC_RX64OCTETS_GB_LO);
|
||||
|
||||
if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RX65TO127OCTETS_GB))
|
||||
stats->rx65to127octets_gb +=
|
||||
XGMAC_IOREAD(pdata, MMC_RX65TO127OCTETS_GB_LO);
|
||||
xgbe_mmc_read(pdata, MMC_RX65TO127OCTETS_GB_LO);
|
||||
|
||||
if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RX128TO255OCTETS_GB))
|
||||
stats->rx128to255octets_gb +=
|
||||
XGMAC_IOREAD(pdata, MMC_RX128TO255OCTETS_GB_LO);
|
||||
xgbe_mmc_read(pdata, MMC_RX128TO255OCTETS_GB_LO);
|
||||
|
||||
if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RX256TO511OCTETS_GB))
|
||||
stats->rx256to511octets_gb +=
|
||||
XGMAC_IOREAD(pdata, MMC_RX256TO511OCTETS_GB_LO);
|
||||
xgbe_mmc_read(pdata, MMC_RX256TO511OCTETS_GB_LO);
|
||||
|
||||
if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RX512TO1023OCTETS_GB))
|
||||
stats->rx512to1023octets_gb +=
|
||||
XGMAC_IOREAD(pdata, MMC_RX512TO1023OCTETS_GB_LO);
|
||||
xgbe_mmc_read(pdata, MMC_RX512TO1023OCTETS_GB_LO);
|
||||
|
||||
if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RX1024TOMAXOCTETS_GB))
|
||||
stats->rx1024tomaxoctets_gb +=
|
||||
XGMAC_IOREAD(pdata, MMC_RX1024TOMAXOCTETS_GB_LO);
|
||||
xgbe_mmc_read(pdata, MMC_RX1024TOMAXOCTETS_GB_LO);
|
||||
|
||||
if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RXUNICASTFRAMES_G))
|
||||
stats->rxunicastframes_g +=
|
||||
XGMAC_IOREAD(pdata, MMC_RXUNICASTFRAMES_G_LO);
|
||||
xgbe_mmc_read(pdata, MMC_RXUNICASTFRAMES_G_LO);
|
||||
|
||||
if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RXLENGTHERROR))
|
||||
stats->rxlengtherror +=
|
||||
XGMAC_IOREAD(pdata, MMC_RXLENGTHERROR_LO);
|
||||
xgbe_mmc_read(pdata, MMC_RXLENGTHERROR_LO);
|
||||
|
||||
if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RXOUTOFRANGETYPE))
|
||||
stats->rxoutofrangetype +=
|
||||
XGMAC_IOREAD(pdata, MMC_RXOUTOFRANGETYPE_LO);
|
||||
xgbe_mmc_read(pdata, MMC_RXOUTOFRANGETYPE_LO);
|
||||
|
||||
if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RXPAUSEFRAMES))
|
||||
stats->rxpauseframes +=
|
||||
XGMAC_IOREAD(pdata, MMC_RXPAUSEFRAMES_LO);
|
||||
xgbe_mmc_read(pdata, MMC_RXPAUSEFRAMES_LO);
|
||||
|
||||
if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RXFIFOOVERFLOW))
|
||||
stats->rxfifooverflow +=
|
||||
XGMAC_IOREAD(pdata, MMC_RXFIFOOVERFLOW_LO);
|
||||
xgbe_mmc_read(pdata, MMC_RXFIFOOVERFLOW_LO);
|
||||
|
||||
if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RXVLANFRAMES_GB))
|
||||
stats->rxvlanframes_gb +=
|
||||
XGMAC_IOREAD(pdata, MMC_RXVLANFRAMES_GB_LO);
|
||||
xgbe_mmc_read(pdata, MMC_RXVLANFRAMES_GB_LO);
|
||||
|
||||
if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RXWATCHDOGERROR))
|
||||
stats->rxwatchdogerror +=
|
||||
XGMAC_IOREAD(pdata, MMC_RXWATCHDOGERROR);
|
||||
xgbe_mmc_read(pdata, MMC_RXWATCHDOGERROR);
|
||||
}
|
||||
|
||||
static void xgbe_read_mmc_stats(struct xgbe_prv_data *pdata)
|
||||
@ -2131,127 +2164,127 @@ static void xgbe_read_mmc_stats(struct xgbe_prv_data *pdata)
|
||||
XGMAC_IOWRITE_BITS(pdata, MMC_CR, MCF, 1);
|
||||
|
||||
stats->txoctetcount_gb +=
|
||||
XGMAC_IOREAD(pdata, MMC_TXOCTETCOUNT_GB_LO);
|
||||
xgbe_mmc_read(pdata, MMC_TXOCTETCOUNT_GB_LO);
|
||||
|
||||
stats->txframecount_gb +=
|
||||
XGMAC_IOREAD(pdata, MMC_TXFRAMECOUNT_GB_LO);
|
||||
xgbe_mmc_read(pdata, MMC_TXFRAMECOUNT_GB_LO);
|
||||
|
||||
stats->txbroadcastframes_g +=
|
||||
XGMAC_IOREAD(pdata, MMC_TXBROADCASTFRAMES_G_LO);
|
||||
xgbe_mmc_read(pdata, MMC_TXBROADCASTFRAMES_G_LO);
|
||||
|
||||
stats->txmulticastframes_g +=
|
||||
XGMAC_IOREAD(pdata, MMC_TXMULTICASTFRAMES_G_LO);
|
||||
xgbe_mmc_read(pdata, MMC_TXMULTICASTFRAMES_G_LO);
|
||||
|
||||
stats->tx64octets_gb +=
|
||||
XGMAC_IOREAD(pdata, MMC_TX64OCTETS_GB_LO);
|
||||
xgbe_mmc_read(pdata, MMC_TX64OCTETS_GB_LO);
|
||||
|
||||
stats->tx65to127octets_gb +=
|
||||
XGMAC_IOREAD(pdata, MMC_TX65TO127OCTETS_GB_LO);
|
||||
xgbe_mmc_read(pdata, MMC_TX65TO127OCTETS_GB_LO);
|
||||
|
||||
stats->tx128to255octets_gb +=
|
||||
XGMAC_IOREAD(pdata, MMC_TX128TO255OCTETS_GB_LO);
|
||||
xgbe_mmc_read(pdata, MMC_TX128TO255OCTETS_GB_LO);
|
||||
|
||||
stats->tx256to511octets_gb +=
|
||||
XGMAC_IOREAD(pdata, MMC_TX256TO511OCTETS_GB_LO);
|
||||
xgbe_mmc_read(pdata, MMC_TX256TO511OCTETS_GB_LO);
|
||||
|
||||
stats->tx512to1023octets_gb +=
|
||||
XGMAC_IOREAD(pdata, MMC_TX512TO1023OCTETS_GB_LO);
|
||||
xgbe_mmc_read(pdata, MMC_TX512TO1023OCTETS_GB_LO);
|
||||
|
||||
stats->tx1024tomaxoctets_gb +=
|
||||
XGMAC_IOREAD(pdata, MMC_TX1024TOMAXOCTETS_GB_LO);
|
||||
xgbe_mmc_read(pdata, MMC_TX1024TOMAXOCTETS_GB_LO);
|
||||
|
||||
stats->txunicastframes_gb +=
|
||||
XGMAC_IOREAD(pdata, MMC_TXUNICASTFRAMES_GB_LO);
|
||||
xgbe_mmc_read(pdata, MMC_TXUNICASTFRAMES_GB_LO);
|
||||
|
||||
stats->txmulticastframes_gb +=
|
||||
XGMAC_IOREAD(pdata, MMC_TXMULTICASTFRAMES_GB_LO);
|
||||
xgbe_mmc_read(pdata, MMC_TXMULTICASTFRAMES_GB_LO);
|
||||
|
||||
stats->txbroadcastframes_g +=
|
||||
XGMAC_IOREAD(pdata, MMC_TXBROADCASTFRAMES_GB_LO);
|
||||
xgbe_mmc_read(pdata, MMC_TXBROADCASTFRAMES_GB_LO);
|
||||
|
||||
stats->txunderflowerror +=
|
||||
XGMAC_IOREAD(pdata, MMC_TXUNDERFLOWERROR_LO);
|
||||
xgbe_mmc_read(pdata, MMC_TXUNDERFLOWERROR_LO);
|
||||
|
||||
stats->txoctetcount_g +=
|
||||
XGMAC_IOREAD(pdata, MMC_TXOCTETCOUNT_G_LO);
|
||||
xgbe_mmc_read(pdata, MMC_TXOCTETCOUNT_G_LO);
|
||||
|
||||
stats->txframecount_g +=
|
||||
XGMAC_IOREAD(pdata, MMC_TXFRAMECOUNT_G_LO);
|
||||
xgbe_mmc_read(pdata, MMC_TXFRAMECOUNT_G_LO);
|
||||
|
||||
stats->txpauseframes +=
|
||||
XGMAC_IOREAD(pdata, MMC_TXPAUSEFRAMES_LO);
|
||||
xgbe_mmc_read(pdata, MMC_TXPAUSEFRAMES_LO);
|
||||
|
||||
stats->txvlanframes_g +=
|
||||
XGMAC_IOREAD(pdata, MMC_TXVLANFRAMES_G_LO);
|
||||
xgbe_mmc_read(pdata, MMC_TXVLANFRAMES_G_LO);
|
||||
|
||||
stats->rxframecount_gb +=
|
||||
XGMAC_IOREAD(pdata, MMC_RXFRAMECOUNT_GB_LO);
|
||||
xgbe_mmc_read(pdata, MMC_RXFRAMECOUNT_GB_LO);
|
||||
|
||||
stats->rxoctetcount_gb +=
|
||||
XGMAC_IOREAD(pdata, MMC_RXOCTETCOUNT_GB_LO);
|
||||
xgbe_mmc_read(pdata, MMC_RXOCTETCOUNT_GB_LO);
|
||||
|
||||
stats->rxoctetcount_g +=
|
||||
XGMAC_IOREAD(pdata, MMC_RXOCTETCOUNT_G_LO);
|
||||
xgbe_mmc_read(pdata, MMC_RXOCTETCOUNT_G_LO);
|
||||
|
||||
stats->rxbroadcastframes_g +=
|
||||
XGMAC_IOREAD(pdata, MMC_RXBROADCASTFRAMES_G_LO);
|
||||
xgbe_mmc_read(pdata, MMC_RXBROADCASTFRAMES_G_LO);
|
||||
|
||||
stats->rxmulticastframes_g +=
|
||||
XGMAC_IOREAD(pdata, MMC_RXMULTICASTFRAMES_G_LO);
|
||||
xgbe_mmc_read(pdata, MMC_RXMULTICASTFRAMES_G_LO);
|
||||
|
||||
stats->rxcrcerror +=
|
||||
XGMAC_IOREAD(pdata, MMC_RXCRCERROR_LO);
|
||||
xgbe_mmc_read(pdata, MMC_RXCRCERROR_LO);
|
||||
|
||||
stats->rxrunterror +=
|
||||
XGMAC_IOREAD(pdata, MMC_RXRUNTERROR);
|
||||
xgbe_mmc_read(pdata, MMC_RXRUNTERROR);
|
||||
|
||||
stats->rxjabbererror +=
|
||||
XGMAC_IOREAD(pdata, MMC_RXJABBERERROR);
|
||||
xgbe_mmc_read(pdata, MMC_RXJABBERERROR);
|
||||
|
||||
stats->rxundersize_g +=
|
||||
XGMAC_IOREAD(pdata, MMC_RXUNDERSIZE_G);
|
||||
xgbe_mmc_read(pdata, MMC_RXUNDERSIZE_G);
|
||||
|
||||
stats->rxoversize_g +=
|
||||
XGMAC_IOREAD(pdata, MMC_RXOVERSIZE_G);
|
||||
xgbe_mmc_read(pdata, MMC_RXOVERSIZE_G);
|
||||
|
||||
stats->rx64octets_gb +=
|
||||
XGMAC_IOREAD(pdata, MMC_RX64OCTETS_GB_LO);
|
||||
xgbe_mmc_read(pdata, MMC_RX64OCTETS_GB_LO);
|
||||
|
||||
stats->rx65to127octets_gb +=
|
||||
XGMAC_IOREAD(pdata, MMC_RX65TO127OCTETS_GB_LO);
|
||||
xgbe_mmc_read(pdata, MMC_RX65TO127OCTETS_GB_LO);
|
||||
|
||||
stats->rx128to255octets_gb +=
|
||||
XGMAC_IOREAD(pdata, MMC_RX128TO255OCTETS_GB_LO);
|
||||
xgbe_mmc_read(pdata, MMC_RX128TO255OCTETS_GB_LO);
|
||||
|
||||
stats->rx256to511octets_gb +=
|
||||
XGMAC_IOREAD(pdata, MMC_RX256TO511OCTETS_GB_LO);
|
||||
xgbe_mmc_read(pdata, MMC_RX256TO511OCTETS_GB_LO);
|
||||
|
||||
stats->rx512to1023octets_gb +=
|
||||
XGMAC_IOREAD(pdata, MMC_RX512TO1023OCTETS_GB_LO);
|
||||
xgbe_mmc_read(pdata, MMC_RX512TO1023OCTETS_GB_LO);
|
||||
|
||||
stats->rx1024tomaxoctets_gb +=
|
||||
XGMAC_IOREAD(pdata, MMC_RX1024TOMAXOCTETS_GB_LO);
|
||||
xgbe_mmc_read(pdata, MMC_RX1024TOMAXOCTETS_GB_LO);
|
||||
|
||||
stats->rxunicastframes_g +=
|
||||
XGMAC_IOREAD(pdata, MMC_RXUNICASTFRAMES_G_LO);
|
||||
xgbe_mmc_read(pdata, MMC_RXUNICASTFRAMES_G_LO);
|
||||
|
||||
stats->rxlengtherror +=
|
||||
XGMAC_IOREAD(pdata, MMC_RXLENGTHERROR_LO);
|
||||
xgbe_mmc_read(pdata, MMC_RXLENGTHERROR_LO);
|
||||
|
||||
stats->rxoutofrangetype +=
|
||||
XGMAC_IOREAD(pdata, MMC_RXOUTOFRANGETYPE_LO);
|
||||
xgbe_mmc_read(pdata, MMC_RXOUTOFRANGETYPE_LO);
|
||||
|
||||
stats->rxpauseframes +=
|
||||
XGMAC_IOREAD(pdata, MMC_RXPAUSEFRAMES_LO);
|
||||
xgbe_mmc_read(pdata, MMC_RXPAUSEFRAMES_LO);
|
||||
|
||||
stats->rxfifooverflow +=
|
||||
XGMAC_IOREAD(pdata, MMC_RXFIFOOVERFLOW_LO);
|
||||
xgbe_mmc_read(pdata, MMC_RXFIFOOVERFLOW_LO);
|
||||
|
||||
stats->rxvlanframes_gb +=
|
||||
XGMAC_IOREAD(pdata, MMC_RXVLANFRAMES_GB_LO);
|
||||
xgbe_mmc_read(pdata, MMC_RXVLANFRAMES_GB_LO);
|
||||
|
||||
stats->rxwatchdogerror +=
|
||||
XGMAC_IOREAD(pdata, MMC_RXWATCHDOGERROR);
|
||||
xgbe_mmc_read(pdata, MMC_RXWATCHDOGERROR);
|
||||
|
||||
/* Un-freeze counters */
|
||||
XGMAC_IOWRITE_BITS(pdata, MMC_CR, MCF, 0);
|
||||
|
@ -361,6 +361,8 @@ void xgbe_get_all_hw_features(struct xgbe_prv_data *pdata)
|
||||
|
||||
memset(hw_feat, 0, sizeof(*hw_feat));
|
||||
|
||||
hw_feat->version = XGMAC_IOREAD(pdata, MAC_VR);
|
||||
|
||||
/* Hardware feature register 0 */
|
||||
hw_feat->gmii = XGMAC_GET_BITS(mac_hfr0, MAC_HWF0R, GMIISEL);
|
||||
hw_feat->vlhash = XGMAC_GET_BITS(mac_hfr0, MAC_HWF0R, VLHASH);
|
||||
|
@ -361,15 +361,16 @@ static void xgbe_get_drvinfo(struct net_device *netdev,
|
||||
struct ethtool_drvinfo *drvinfo)
|
||||
{
|
||||
struct xgbe_prv_data *pdata = netdev_priv(netdev);
|
||||
struct xgbe_hw_features *hw_feat = &pdata->hw_feat;
|
||||
|
||||
strlcpy(drvinfo->driver, XGBE_DRV_NAME, sizeof(drvinfo->driver));
|
||||
strlcpy(drvinfo->version, XGBE_DRV_VERSION, sizeof(drvinfo->version));
|
||||
strlcpy(drvinfo->bus_info, dev_name(pdata->dev),
|
||||
sizeof(drvinfo->bus_info));
|
||||
snprintf(drvinfo->fw_version, sizeof(drvinfo->fw_version), "%d.%d.%d",
|
||||
XGMAC_IOREAD_BITS(pdata, MAC_VR, USERVER),
|
||||
XGMAC_IOREAD_BITS(pdata, MAC_VR, DEVID),
|
||||
XGMAC_IOREAD_BITS(pdata, MAC_VR, SNPSVER));
|
||||
XGMAC_GET_BITS(hw_feat->version, MAC_VR, USERVER),
|
||||
XGMAC_GET_BITS(hw_feat->version, MAC_VR, DEVID),
|
||||
XGMAC_GET_BITS(hw_feat->version, MAC_VR, SNPSVER));
|
||||
drvinfo->n_stats = XGBE_STATS_COUNT;
|
||||
}
|
||||
|
||||
|
@ -172,7 +172,7 @@ static struct xgbe_channel *xgbe_alloc_rings(struct xgbe_prv_data *pdata)
|
||||
}
|
||||
|
||||
if (i < pdata->rx_ring_count) {
|
||||
spin_lock_init(&tx_ring->lock);
|
||||
spin_lock_init(&rx_ring->lock);
|
||||
channel->rx_ring = rx_ring++;
|
||||
}
|
||||
|
||||
|
@ -183,6 +183,7 @@
|
||||
#define XGMAC_DRIVER_CONTEXT 1
|
||||
#define XGMAC_IOCTL_CONTEXT 2
|
||||
|
||||
#define XGBE_FIFO_MAX 81920
|
||||
#define XGBE_FIFO_SIZE_B(x) (x)
|
||||
#define XGBE_FIFO_SIZE_KB(x) (x * 1024)
|
||||
|
||||
@ -526,6 +527,9 @@ struct xgbe_desc_if {
|
||||
* or configurations are present in the device.
|
||||
*/
|
||||
struct xgbe_hw_features {
|
||||
/* HW Version */
|
||||
unsigned int version;
|
||||
|
||||
/* HW Feature Register0 */
|
||||
unsigned int gmii; /* 1000 Mbps support */
|
||||
unsigned int vlhash; /* VLAN Hash Filter */
|
||||
|
@ -1,5 +1,6 @@
|
||||
config NET_XGENE
|
||||
tristate "APM X-Gene SoC Ethernet Driver"
|
||||
depends on HAS_DMA
|
||||
select PHYLIB
|
||||
help
|
||||
This is the Ethernet driver for the on-chip ethernet interface on the
|
||||
|
@ -84,7 +84,7 @@ config BNX2
|
||||
|
||||
config CNIC
|
||||
tristate "QLogic CNIC support"
|
||||
depends on PCI
|
||||
depends on PCI && (IPV6 || IPV6=n)
|
||||
select BNX2
|
||||
select UIO
|
||||
---help---
|
||||
|
@ -2233,7 +2233,12 @@ struct shmem2_region {
|
||||
u32 reserved3; /* Offset 0x14C */
|
||||
u32 reserved4; /* Offset 0x150 */
|
||||
u32 link_attr_sync[PORT_MAX]; /* Offset 0x154 */
|
||||
#define LINK_ATTR_SYNC_KR2_ENABLE (1<<0)
|
||||
#define LINK_ATTR_SYNC_KR2_ENABLE 0x00000001
|
||||
#define LINK_SFP_EEPROM_COMP_CODE_MASK 0x0000ff00
|
||||
#define LINK_SFP_EEPROM_COMP_CODE_SHIFT 8
|
||||
#define LINK_SFP_EEPROM_COMP_CODE_SR 0x00001000
|
||||
#define LINK_SFP_EEPROM_COMP_CODE_LR 0x00002000
|
||||
#define LINK_SFP_EEPROM_COMP_CODE_LRM 0x00004000
|
||||
|
||||
u32 reserved5[2];
|
||||
u32 reserved6[PORT_MAX];
|
||||
|
@ -154,15 +154,22 @@ typedef int (*read_sfp_module_eeprom_func_p)(struct bnx2x_phy *phy,
|
||||
LINK_STATUS_LINK_PARTNER_ASYMMETRIC_PAUSE)
|
||||
|
||||
#define SFP_EEPROM_CON_TYPE_ADDR 0x2
|
||||
#define SFP_EEPROM_CON_TYPE_VAL_UNKNOWN 0x0
|
||||
#define SFP_EEPROM_CON_TYPE_VAL_LC 0x7
|
||||
#define SFP_EEPROM_CON_TYPE_VAL_COPPER 0x21
|
||||
#define SFP_EEPROM_CON_TYPE_VAL_RJ45 0x22
|
||||
|
||||
|
||||
#define SFP_EEPROM_COMP_CODE_ADDR 0x3
|
||||
#define SFP_EEPROM_COMP_CODE_SR_MASK (1<<4)
|
||||
#define SFP_EEPROM_COMP_CODE_LR_MASK (1<<5)
|
||||
#define SFP_EEPROM_COMP_CODE_LRM_MASK (1<<6)
|
||||
#define SFP_EEPROM_10G_COMP_CODE_ADDR 0x3
|
||||
#define SFP_EEPROM_10G_COMP_CODE_SR_MASK (1<<4)
|
||||
#define SFP_EEPROM_10G_COMP_CODE_LR_MASK (1<<5)
|
||||
#define SFP_EEPROM_10G_COMP_CODE_LRM_MASK (1<<6)
|
||||
|
||||
#define SFP_EEPROM_1G_COMP_CODE_ADDR 0x6
|
||||
#define SFP_EEPROM_1G_COMP_CODE_SX (1<<0)
|
||||
#define SFP_EEPROM_1G_COMP_CODE_LX (1<<1)
|
||||
#define SFP_EEPROM_1G_COMP_CODE_CX (1<<2)
|
||||
#define SFP_EEPROM_1G_COMP_CODE_BASE_T (1<<3)
|
||||
|
||||
#define SFP_EEPROM_FC_TX_TECH_ADDR 0x8
|
||||
#define SFP_EEPROM_FC_TX_TECH_BITMASK_COPPER_PASSIVE 0x4
|
||||
@ -3633,8 +3640,8 @@ static void bnx2x_warpcore_enable_AN_KR2(struct bnx2x_phy *phy,
|
||||
reg_set[i].val);
|
||||
|
||||
/* Start KR2 work-around timer which handles BCM8073 link-parner */
|
||||
vars->link_attr_sync |= LINK_ATTR_SYNC_KR2_ENABLE;
|
||||
bnx2x_update_link_attr(params, vars->link_attr_sync);
|
||||
params->link_attr_sync |= LINK_ATTR_SYNC_KR2_ENABLE;
|
||||
bnx2x_update_link_attr(params, params->link_attr_sync);
|
||||
}
|
||||
|
||||
static void bnx2x_disable_kr2(struct link_params *params,
|
||||
@ -3666,8 +3673,8 @@ static void bnx2x_disable_kr2(struct link_params *params,
|
||||
for (i = 0; i < ARRAY_SIZE(reg_set); i++)
|
||||
bnx2x_cl45_write(bp, phy, reg_set[i].devad, reg_set[i].reg,
|
||||
reg_set[i].val);
|
||||
vars->link_attr_sync &= ~LINK_ATTR_SYNC_KR2_ENABLE;
|
||||
bnx2x_update_link_attr(params, vars->link_attr_sync);
|
||||
params->link_attr_sync &= ~LINK_ATTR_SYNC_KR2_ENABLE;
|
||||
bnx2x_update_link_attr(params, params->link_attr_sync);
|
||||
|
||||
vars->check_kr2_recovery_cnt = CHECK_KR2_RECOVERY_CNT;
|
||||
}
|
||||
@ -4810,7 +4817,7 @@ void bnx2x_link_status_update(struct link_params *params,
|
||||
~FEATURE_CONFIG_PFC_ENABLED;
|
||||
|
||||
if (SHMEM2_HAS(bp, link_attr_sync))
|
||||
vars->link_attr_sync = SHMEM2_RD(bp,
|
||||
params->link_attr_sync = SHMEM2_RD(bp,
|
||||
link_attr_sync[params->port]);
|
||||
|
||||
DP(NETIF_MSG_LINK, "link_status 0x%x phy_link_up %x int_mask 0x%x\n",
|
||||
@ -8057,21 +8064,24 @@ static int bnx2x_get_edc_mode(struct bnx2x_phy *phy,
|
||||
{
|
||||
struct bnx2x *bp = params->bp;
|
||||
u32 sync_offset = 0, phy_idx, media_types;
|
||||
u8 gport, val[2], check_limiting_mode = 0;
|
||||
u8 val[SFP_EEPROM_FC_TX_TECH_ADDR + 1], check_limiting_mode = 0;
|
||||
*edc_mode = EDC_MODE_LIMITING;
|
||||
phy->media_type = ETH_PHY_UNSPECIFIED;
|
||||
/* First check for copper cable */
|
||||
if (bnx2x_read_sfp_module_eeprom(phy,
|
||||
params,
|
||||
I2C_DEV_ADDR_A0,
|
||||
SFP_EEPROM_CON_TYPE_ADDR,
|
||||
2,
|
||||
0,
|
||||
SFP_EEPROM_FC_TX_TECH_ADDR + 1,
|
||||
(u8 *)val) != 0) {
|
||||
DP(NETIF_MSG_LINK, "Failed to read from SFP+ module EEPROM\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
switch (val[0]) {
|
||||
params->link_attr_sync &= ~LINK_SFP_EEPROM_COMP_CODE_MASK;
|
||||
params->link_attr_sync |= val[SFP_EEPROM_10G_COMP_CODE_ADDR] <<
|
||||
LINK_SFP_EEPROM_COMP_CODE_SHIFT;
|
||||
bnx2x_update_link_attr(params, params->link_attr_sync);
|
||||
switch (val[SFP_EEPROM_CON_TYPE_ADDR]) {
|
||||
case SFP_EEPROM_CON_TYPE_VAL_COPPER:
|
||||
{
|
||||
u8 copper_module_type;
|
||||
@ -8079,17 +8089,7 @@ static int bnx2x_get_edc_mode(struct bnx2x_phy *phy,
|
||||
/* Check if its active cable (includes SFP+ module)
|
||||
* of passive cable
|
||||
*/
|
||||
if (bnx2x_read_sfp_module_eeprom(phy,
|
||||
params,
|
||||
I2C_DEV_ADDR_A0,
|
||||
SFP_EEPROM_FC_TX_TECH_ADDR,
|
||||
1,
|
||||
&copper_module_type) != 0) {
|
||||
DP(NETIF_MSG_LINK,
|
||||
"Failed to read copper-cable-type"
|
||||
" from SFP+ EEPROM\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
copper_module_type = val[SFP_EEPROM_FC_TX_TECH_ADDR];
|
||||
|
||||
if (copper_module_type &
|
||||
SFP_EEPROM_FC_TX_TECH_BITMASK_COPPER_ACTIVE) {
|
||||
@ -8115,16 +8115,18 @@ static int bnx2x_get_edc_mode(struct bnx2x_phy *phy,
|
||||
}
|
||||
break;
|
||||
}
|
||||
case SFP_EEPROM_CON_TYPE_VAL_UNKNOWN:
|
||||
case SFP_EEPROM_CON_TYPE_VAL_LC:
|
||||
case SFP_EEPROM_CON_TYPE_VAL_RJ45:
|
||||
check_limiting_mode = 1;
|
||||
if ((val[1] & (SFP_EEPROM_COMP_CODE_SR_MASK |
|
||||
SFP_EEPROM_COMP_CODE_LR_MASK |
|
||||
SFP_EEPROM_COMP_CODE_LRM_MASK)) == 0) {
|
||||
if ((val[SFP_EEPROM_10G_COMP_CODE_ADDR] &
|
||||
(SFP_EEPROM_10G_COMP_CODE_SR_MASK |
|
||||
SFP_EEPROM_10G_COMP_CODE_LR_MASK |
|
||||
SFP_EEPROM_10G_COMP_CODE_LRM_MASK)) == 0) {
|
||||
DP(NETIF_MSG_LINK, "1G SFP module detected\n");
|
||||
gport = params->port;
|
||||
phy->media_type = ETH_PHY_SFP_1G_FIBER;
|
||||
if (phy->req_line_speed != SPEED_1000) {
|
||||
u8 gport = params->port;
|
||||
phy->req_line_speed = SPEED_1000;
|
||||
if (!CHIP_IS_E1x(bp)) {
|
||||
gport = BP_PATH(bp) +
|
||||
@ -8134,6 +8136,12 @@ static int bnx2x_get_edc_mode(struct bnx2x_phy *phy,
|
||||
"Warning: Link speed was forced to 1000Mbps. Current SFP module in port %d is not compliant with 10G Ethernet\n",
|
||||
gport);
|
||||
}
|
||||
if (val[SFP_EEPROM_1G_COMP_CODE_ADDR] &
|
||||
SFP_EEPROM_1G_COMP_CODE_BASE_T) {
|
||||
bnx2x_sfp_set_transmitter(params, phy, 0);
|
||||
msleep(40);
|
||||
bnx2x_sfp_set_transmitter(params, phy, 1);
|
||||
}
|
||||
} else {
|
||||
int idx, cfg_idx = 0;
|
||||
DP(NETIF_MSG_LINK, "10G Optic module detected\n");
|
||||
@ -8149,7 +8157,7 @@ static int bnx2x_get_edc_mode(struct bnx2x_phy *phy,
|
||||
break;
|
||||
default:
|
||||
DP(NETIF_MSG_LINK, "Unable to determine module type 0x%x !!!\n",
|
||||
val[0]);
|
||||
val[SFP_EEPROM_CON_TYPE_ADDR]);
|
||||
return -EINVAL;
|
||||
}
|
||||
sync_offset = params->shmem_base +
|
||||
@ -13507,7 +13515,7 @@ static void bnx2x_check_kr2_wa(struct link_params *params,
|
||||
|
||||
sigdet = bnx2x_warpcore_get_sigdet(phy, params);
|
||||
if (!sigdet) {
|
||||
if (!(vars->link_attr_sync & LINK_ATTR_SYNC_KR2_ENABLE)) {
|
||||
if (!(params->link_attr_sync & LINK_ATTR_SYNC_KR2_ENABLE)) {
|
||||
bnx2x_kr2_recovery(params, vars, phy);
|
||||
DP(NETIF_MSG_LINK, "No sigdet\n");
|
||||
}
|
||||
@ -13525,7 +13533,7 @@ static void bnx2x_check_kr2_wa(struct link_params *params,
|
||||
|
||||
/* CL73 has not begun yet */
|
||||
if (base_page == 0) {
|
||||
if (!(vars->link_attr_sync & LINK_ATTR_SYNC_KR2_ENABLE)) {
|
||||
if (!(params->link_attr_sync & LINK_ATTR_SYNC_KR2_ENABLE)) {
|
||||
bnx2x_kr2_recovery(params, vars, phy);
|
||||
DP(NETIF_MSG_LINK, "No BP\n");
|
||||
}
|
||||
@ -13541,7 +13549,7 @@ static void bnx2x_check_kr2_wa(struct link_params *params,
|
||||
((next_page & 0xe0) == 0x20))));
|
||||
|
||||
/* In case KR2 is already disabled, check if we need to re-enable it */
|
||||
if (!(vars->link_attr_sync & LINK_ATTR_SYNC_KR2_ENABLE)) {
|
||||
if (!(params->link_attr_sync & LINK_ATTR_SYNC_KR2_ENABLE)) {
|
||||
if (!not_kr2_device) {
|
||||
DP(NETIF_MSG_LINK, "BP=0x%x, NP=0x%x\n", base_page,
|
||||
next_page);
|
||||
|
@ -323,6 +323,9 @@ struct link_params {
|
||||
#define LINK_FLAGS_INT_DISABLED (1<<0)
|
||||
#define PHY_INITIALIZED (1<<1)
|
||||
u32 lfa_base;
|
||||
|
||||
/* The same definitions as the shmem2 parameter */
|
||||
u32 link_attr_sync;
|
||||
};
|
||||
|
||||
/* Output parameters */
|
||||
@ -364,8 +367,6 @@ struct link_vars {
|
||||
u8 rx_tx_asic_rst;
|
||||
u8 turn_to_run_wc_rt;
|
||||
u16 rsrv2;
|
||||
/* The same definitions as the shmem2 parameter */
|
||||
u32 link_attr_sync;
|
||||
};
|
||||
|
||||
/***********************************************************/
|
||||
|
@ -6849,6 +6849,37 @@ static void bnx2x__common_init_phy(struct bnx2x *bp)
|
||||
bnx2x_release_phy_lock(bp);
|
||||
}
|
||||
|
||||
static void bnx2x_config_endianity(struct bnx2x *bp, u32 val)
|
||||
{
|
||||
REG_WR(bp, PXP2_REG_RQ_QM_ENDIAN_M, val);
|
||||
REG_WR(bp, PXP2_REG_RQ_TM_ENDIAN_M, val);
|
||||
REG_WR(bp, PXP2_REG_RQ_SRC_ENDIAN_M, val);
|
||||
REG_WR(bp, PXP2_REG_RQ_CDU_ENDIAN_M, val);
|
||||
REG_WR(bp, PXP2_REG_RQ_DBG_ENDIAN_M, val);
|
||||
|
||||
/* make sure this value is 0 */
|
||||
REG_WR(bp, PXP2_REG_RQ_HC_ENDIAN_M, 0);
|
||||
|
||||
REG_WR(bp, PXP2_REG_RD_QM_SWAP_MODE, val);
|
||||
REG_WR(bp, PXP2_REG_RD_TM_SWAP_MODE, val);
|
||||
REG_WR(bp, PXP2_REG_RD_SRC_SWAP_MODE, val);
|
||||
REG_WR(bp, PXP2_REG_RD_CDURD_SWAP_MODE, val);
|
||||
}
|
||||
|
||||
static void bnx2x_set_endianity(struct bnx2x *bp)
|
||||
{
|
||||
#ifdef __BIG_ENDIAN
|
||||
bnx2x_config_endianity(bp, 1);
|
||||
#else
|
||||
bnx2x_config_endianity(bp, 0);
|
||||
#endif
|
||||
}
|
||||
|
||||
static void bnx2x_reset_endianity(struct bnx2x *bp)
|
||||
{
|
||||
bnx2x_config_endianity(bp, 0);
|
||||
}
|
||||
|
||||
/**
|
||||
* bnx2x_init_hw_common - initialize the HW at the COMMON phase.
|
||||
*
|
||||
@ -6915,23 +6946,7 @@ static int bnx2x_init_hw_common(struct bnx2x *bp)
|
||||
|
||||
bnx2x_init_block(bp, BLOCK_PXP2, PHASE_COMMON);
|
||||
bnx2x_init_pxp(bp);
|
||||
|
||||
#ifdef __BIG_ENDIAN
|
||||
REG_WR(bp, PXP2_REG_RQ_QM_ENDIAN_M, 1);
|
||||
REG_WR(bp, PXP2_REG_RQ_TM_ENDIAN_M, 1);
|
||||
REG_WR(bp, PXP2_REG_RQ_SRC_ENDIAN_M, 1);
|
||||
REG_WR(bp, PXP2_REG_RQ_CDU_ENDIAN_M, 1);
|
||||
REG_WR(bp, PXP2_REG_RQ_DBG_ENDIAN_M, 1);
|
||||
/* make sure this value is 0 */
|
||||
REG_WR(bp, PXP2_REG_RQ_HC_ENDIAN_M, 0);
|
||||
|
||||
/* REG_WR(bp, PXP2_REG_RD_PBF_SWAP_MODE, 1); */
|
||||
REG_WR(bp, PXP2_REG_RD_QM_SWAP_MODE, 1);
|
||||
REG_WR(bp, PXP2_REG_RD_TM_SWAP_MODE, 1);
|
||||
REG_WR(bp, PXP2_REG_RD_SRC_SWAP_MODE, 1);
|
||||
REG_WR(bp, PXP2_REG_RD_CDURD_SWAP_MODE, 1);
|
||||
#endif
|
||||
|
||||
bnx2x_set_endianity(bp);
|
||||
bnx2x_ilt_init_page_size(bp, INITOP_SET);
|
||||
|
||||
if (CHIP_REV_IS_FPGA(bp) && CHIP_IS_E1H(bp))
|
||||
@ -13169,9 +13184,15 @@ static void __bnx2x_remove(struct pci_dev *pdev,
|
||||
bnx2x_iov_remove_one(bp);
|
||||
|
||||
/* Power on: we can't let PCI layer write to us while we are in D3 */
|
||||
if (IS_PF(bp))
|
||||
if (IS_PF(bp)) {
|
||||
bnx2x_set_power_state(bp, PCI_D0);
|
||||
|
||||
/* Set endianity registers to reset values in case next driver
|
||||
* boots in different endianty environment.
|
||||
*/
|
||||
bnx2x_reset_endianity(bp);
|
||||
}
|
||||
|
||||
/* Disable MSI/MSI-X */
|
||||
bnx2x_disable_msi(bp);
|
||||
|
||||
|
@ -31,7 +31,7 @@
|
||||
#include <linux/if_vlan.h>
|
||||
#include <linux/prefetch.h>
|
||||
#include <linux/random.h>
|
||||
#if defined(CONFIG_VLAN_8021Q) || defined(CONFIG_VLAN_8021Q_MODULE)
|
||||
#if IS_ENABLED(CONFIG_VLAN_8021Q)
|
||||
#define BCM_VLAN 1
|
||||
#endif
|
||||
#include <net/ip.h>
|
||||
@ -3685,7 +3685,7 @@ static int cnic_get_v4_route(struct sockaddr_in *dst_addr,
|
||||
static int cnic_get_v6_route(struct sockaddr_in6 *dst_addr,
|
||||
struct dst_entry **dst)
|
||||
{
|
||||
#if defined(CONFIG_IPV6) || (defined(CONFIG_IPV6_MODULE) && defined(MODULE))
|
||||
#if IS_ENABLED(CONFIG_IPV6)
|
||||
struct flowi6 fl6;
|
||||
|
||||
memset(&fl6, 0, sizeof(fl6));
|
||||
|
@ -11617,6 +11617,12 @@ static int tg3_open(struct net_device *dev)
|
||||
struct tg3 *tp = netdev_priv(dev);
|
||||
int err;
|
||||
|
||||
if (tp->pcierr_recovery) {
|
||||
netdev_err(dev, "Failed to open device. PCI error recovery "
|
||||
"in progress\n");
|
||||
return -EAGAIN;
|
||||
}
|
||||
|
||||
if (tp->fw_needed) {
|
||||
err = tg3_request_firmware(tp);
|
||||
if (tg3_asic_rev(tp) == ASIC_REV_57766) {
|
||||
@ -11674,6 +11680,12 @@ static int tg3_close(struct net_device *dev)
|
||||
{
|
||||
struct tg3 *tp = netdev_priv(dev);
|
||||
|
||||
if (tp->pcierr_recovery) {
|
||||
netdev_err(dev, "Failed to close device. PCI error recovery "
|
||||
"in progress\n");
|
||||
return -EAGAIN;
|
||||
}
|
||||
|
||||
tg3_ptp_fini(tp);
|
||||
|
||||
tg3_stop(tp);
|
||||
@ -17561,6 +17573,7 @@ static int tg3_init_one(struct pci_dev *pdev,
|
||||
tp->rx_mode = TG3_DEF_RX_MODE;
|
||||
tp->tx_mode = TG3_DEF_TX_MODE;
|
||||
tp->irq_sync = 1;
|
||||
tp->pcierr_recovery = false;
|
||||
|
||||
if (tg3_debug > 0)
|
||||
tp->msg_enable = tg3_debug;
|
||||
@ -18071,6 +18084,8 @@ static pci_ers_result_t tg3_io_error_detected(struct pci_dev *pdev,
|
||||
|
||||
rtnl_lock();
|
||||
|
||||
tp->pcierr_recovery = true;
|
||||
|
||||
/* We probably don't have netdev yet */
|
||||
if (!netdev || !netif_running(netdev))
|
||||
goto done;
|
||||
@ -18195,6 +18210,7 @@ static void tg3_io_resume(struct pci_dev *pdev)
|
||||
tg3_phy_start(tp);
|
||||
|
||||
done:
|
||||
tp->pcierr_recovery = false;
|
||||
rtnl_unlock();
|
||||
}
|
||||
|
||||
|
@ -3407,6 +3407,7 @@ struct tg3 {
|
||||
|
||||
struct device *hwmon_dev;
|
||||
bool link_up;
|
||||
bool pcierr_recovery;
|
||||
};
|
||||
|
||||
/* Accessor macros for chip and asic attributes
|
||||
|
@ -2506,7 +2506,7 @@ bnad_tso_prepare(struct bnad *bnad, struct sk_buff *skb)
|
||||
* For TSO, the TCP checksum field is seeded with pseudo-header sum
|
||||
* excluding the length field.
|
||||
*/
|
||||
if (skb->protocol == htons(ETH_P_IP)) {
|
||||
if (vlan_get_protocol(skb) == htons(ETH_P_IP)) {
|
||||
struct iphdr *iph = ip_hdr(skb);
|
||||
|
||||
/* Do we really need these? */
|
||||
@ -2870,12 +2870,13 @@ bnad_txq_wi_prepare(struct bnad *bnad, struct bna_tcb *tcb,
|
||||
}
|
||||
|
||||
if (skb->ip_summed == CHECKSUM_PARTIAL) {
|
||||
__be16 net_proto = vlan_get_protocol(skb);
|
||||
u8 proto = 0;
|
||||
|
||||
if (skb->protocol == htons(ETH_P_IP))
|
||||
if (net_proto == htons(ETH_P_IP))
|
||||
proto = ip_hdr(skb)->protocol;
|
||||
#ifdef NETIF_F_IPV6_CSUM
|
||||
else if (skb->protocol == htons(ETH_P_IPV6)) {
|
||||
else if (net_proto == htons(ETH_P_IPV6)) {
|
||||
/* nexthdr may not be TCP immediately. */
|
||||
proto = ipv6_hdr(skb)->nexthdr;
|
||||
}
|
||||
|
@ -1,6 +1,7 @@
|
||||
config NET_CALXEDA_XGMAC
|
||||
tristate "Calxeda 1G/10G XGMAC Ethernet driver"
|
||||
depends on HAS_IOMEM && HAS_DMA
|
||||
depends on ARCH_HIGHBANK || COMPILE_TEST
|
||||
select CRC32
|
||||
help
|
||||
This is the driver for the XGMAC Ethernet IP block found on Calxeda
|
||||
|
@ -1253,7 +1253,9 @@ freeout: t4_free_sge_resources(adap);
|
||||
goto freeout;
|
||||
}
|
||||
|
||||
t4_write_reg(adap, MPS_TRC_RSS_CONTROL,
|
||||
t4_write_reg(adap, is_t4(adap->params.chip) ?
|
||||
MPS_TRC_RSS_CONTROL :
|
||||
MPS_T5_TRC_RSS_CONTROL,
|
||||
RSSCONTROL(netdev2pinfo(adap->port[0])->tx_chan) |
|
||||
QUEUENUMBER(s->ethrxq[0].rspq.abs_id));
|
||||
return 0;
|
||||
@ -1761,7 +1763,8 @@ static void get_regs(struct net_device *dev, struct ethtool_regs *regs,
|
||||
0xd004, 0xd03c,
|
||||
0xdfc0, 0xdfe0,
|
||||
0xe000, 0xea7c,
|
||||
0xf000, 0x11190,
|
||||
0xf000, 0x11110,
|
||||
0x11118, 0x11190,
|
||||
0x19040, 0x1906c,
|
||||
0x19078, 0x19080,
|
||||
0x1908c, 0x19124,
|
||||
@ -1968,7 +1971,8 @@ static void get_regs(struct net_device *dev, struct ethtool_regs *regs,
|
||||
0xd004, 0xd03c,
|
||||
0xdfc0, 0xdfe0,
|
||||
0xe000, 0x11088,
|
||||
0x1109c, 0x1117c,
|
||||
0x1109c, 0x11110,
|
||||
0x11118, 0x1117c,
|
||||
0x11190, 0x11204,
|
||||
0x19040, 0x1906c,
|
||||
0x19078, 0x19080,
|
||||
@ -5955,7 +5959,8 @@ static int adap_init0(struct adapter *adap)
|
||||
params[3] = FW_PARAM_PFVF(CQ_END);
|
||||
params[4] = FW_PARAM_PFVF(OCQ_START);
|
||||
params[5] = FW_PARAM_PFVF(OCQ_END);
|
||||
ret = t4_query_params(adap, 0, 0, 0, 6, params, val);
|
||||
ret = t4_query_params(adap, adap->mbox, adap->fn, 0, 6, params,
|
||||
val);
|
||||
if (ret < 0)
|
||||
goto bye;
|
||||
adap->vres.qp.start = val[0];
|
||||
@ -5967,7 +5972,8 @@ static int adap_init0(struct adapter *adap)
|
||||
|
||||
params[0] = FW_PARAM_DEV(MAXORDIRD_QP);
|
||||
params[1] = FW_PARAM_DEV(MAXIRD_ADAPTER);
|
||||
ret = t4_query_params(adap, 0, 0, 0, 2, params, val);
|
||||
ret = t4_query_params(adap, adap->mbox, adap->fn, 0, 2, params,
|
||||
val);
|
||||
if (ret < 0) {
|
||||
adap->params.max_ordird_qp = 8;
|
||||
adap->params.max_ird_adapter = 32 * adap->tids.ntids;
|
||||
|
@ -167,6 +167,34 @@ void t4_hw_pci_read_cfg4(struct adapter *adap, int reg, u32 *val)
|
||||
t4_write_reg(adap, PCIE_CFG_SPACE_REQ, 0);
|
||||
}
|
||||
|
||||
/*
|
||||
* t4_report_fw_error - report firmware error
|
||||
* @adap: the adapter
|
||||
*
|
||||
* The adapter firmware can indicate error conditions to the host.
|
||||
* If the firmware has indicated an error, print out the reason for
|
||||
* the firmware error.
|
||||
*/
|
||||
static void t4_report_fw_error(struct adapter *adap)
|
||||
{
|
||||
static const char *const reason[] = {
|
||||
"Crash", /* PCIE_FW_EVAL_CRASH */
|
||||
"During Device Preparation", /* PCIE_FW_EVAL_PREP */
|
||||
"During Device Configuration", /* PCIE_FW_EVAL_CONF */
|
||||
"During Device Initialization", /* PCIE_FW_EVAL_INIT */
|
||||
"Unexpected Event", /* PCIE_FW_EVAL_UNEXPECTEDEVENT */
|
||||
"Insufficient Airflow", /* PCIE_FW_EVAL_OVERHEAT */
|
||||
"Device Shutdown", /* PCIE_FW_EVAL_DEVICESHUTDOWN */
|
||||
"Reserved", /* reserved */
|
||||
};
|
||||
u32 pcie_fw;
|
||||
|
||||
pcie_fw = t4_read_reg(adap, MA_PCIE_FW);
|
||||
if (pcie_fw & FW_PCIE_FW_ERR)
|
||||
dev_err(adap->pdev_dev, "Firmware reports adapter error: %s\n",
|
||||
reason[FW_PCIE_FW_EVAL_GET(pcie_fw)]);
|
||||
}
|
||||
|
||||
/*
|
||||
* Get the reply to a mailbox command and store it in @rpl in big-endian order.
|
||||
*/
|
||||
@ -300,6 +328,7 @@ int t4_wr_mbox_meat(struct adapter *adap, int mbox, const void *cmd, int size,
|
||||
dump_mbox(adap, mbox, data_reg);
|
||||
dev_err(adap->pdev_dev, "command %#x in mailbox %d timed out\n",
|
||||
*(const u8 *)cmd, mbox);
|
||||
t4_report_fw_error(adap);
|
||||
return -ETIMEDOUT;
|
||||
}
|
||||
|
||||
@ -566,6 +595,7 @@ int t4_memory_rw(struct adapter *adap, int win, int mtype, u32 addr,
|
||||
#define VPD_BASE 0x400
|
||||
#define VPD_BASE_OLD 0
|
||||
#define VPD_LEN 1024
|
||||
#define CHELSIO_VPD_UNIQUE_ID 0x82
|
||||
|
||||
/**
|
||||
* t4_seeprom_wp - enable/disable EEPROM write protection
|
||||
@ -603,7 +633,14 @@ int get_vpd_params(struct adapter *adapter, struct vpd_params *p)
|
||||
ret = pci_read_vpd(adapter->pdev, VPD_BASE, sizeof(u32), vpd);
|
||||
if (ret < 0)
|
||||
goto out;
|
||||
addr = *vpd == 0x82 ? VPD_BASE : VPD_BASE_OLD;
|
||||
|
||||
/* The VPD shall have a unique identifier specified by the PCI SIG.
|
||||
* For chelsio adapters, the identifier is 0x82. The first byte of a VPD
|
||||
* shall be CHELSIO_VPD_UNIQUE_ID (0x82). The VPD programming software
|
||||
* is expected to automatically put this entry at the
|
||||
* beginning of the VPD.
|
||||
*/
|
||||
addr = *vpd == CHELSIO_VPD_UNIQUE_ID ? VPD_BASE : VPD_BASE_OLD;
|
||||
|
||||
ret = pci_read_vpd(adapter->pdev, addr, VPD_LEN, vpd);
|
||||
if (ret < 0)
|
||||
@ -667,6 +704,7 @@ int get_vpd_params(struct adapter *adapter, struct vpd_params *p)
|
||||
i = pci_vpd_info_field_size(vpd + sn - PCI_VPD_INFO_FLD_HDR_SIZE);
|
||||
memcpy(p->sn, vpd + sn, min(i, SERNUM_LEN));
|
||||
strim(p->sn);
|
||||
i = pci_vpd_info_field_size(vpd + pn - PCI_VPD_INFO_FLD_HDR_SIZE);
|
||||
memcpy(p->pn, vpd + pn, min(i, PN_LEN));
|
||||
strim(p->pn);
|
||||
|
||||
@ -1394,15 +1432,18 @@ static void pcie_intr_handler(struct adapter *adapter)
|
||||
|
||||
int fat;
|
||||
|
||||
fat = t4_handle_intr_status(adapter,
|
||||
PCIE_CORE_UTL_SYSTEM_BUS_AGENT_STATUS,
|
||||
sysbus_intr_info) +
|
||||
t4_handle_intr_status(adapter,
|
||||
PCIE_CORE_UTL_PCI_EXPRESS_PORT_STATUS,
|
||||
pcie_port_intr_info) +
|
||||
t4_handle_intr_status(adapter, PCIE_INT_CAUSE,
|
||||
is_t4(adapter->params.chip) ?
|
||||
pcie_intr_info : t5_pcie_intr_info);
|
||||
if (is_t4(adapter->params.chip))
|
||||
fat = t4_handle_intr_status(adapter,
|
||||
PCIE_CORE_UTL_SYSTEM_BUS_AGENT_STATUS,
|
||||
sysbus_intr_info) +
|
||||
t4_handle_intr_status(adapter,
|
||||
PCIE_CORE_UTL_PCI_EXPRESS_PORT_STATUS,
|
||||
pcie_port_intr_info) +
|
||||
t4_handle_intr_status(adapter, PCIE_INT_CAUSE,
|
||||
pcie_intr_info);
|
||||
else
|
||||
fat = t4_handle_intr_status(adapter, PCIE_INT_CAUSE,
|
||||
t5_pcie_intr_info);
|
||||
|
||||
if (fat)
|
||||
t4_fatal_err(adapter);
|
||||
@ -1521,6 +1562,9 @@ static void cim_intr_handler(struct adapter *adapter)
|
||||
|
||||
int fat;
|
||||
|
||||
if (t4_read_reg(adapter, MA_PCIE_FW) & FW_PCIE_FW_ERR)
|
||||
t4_report_fw_error(adapter);
|
||||
|
||||
fat = t4_handle_intr_status(adapter, CIM_HOST_INT_CAUSE,
|
||||
cim_intr_info) +
|
||||
t4_handle_intr_status(adapter, CIM_HOST_UPACC_INT_CAUSE,
|
||||
@ -1768,10 +1812,16 @@ static void ma_intr_handler(struct adapter *adap)
|
||||
{
|
||||
u32 v, status = t4_read_reg(adap, MA_INT_CAUSE);
|
||||
|
||||
if (status & MEM_PERR_INT_CAUSE)
|
||||
if (status & MEM_PERR_INT_CAUSE) {
|
||||
dev_alert(adap->pdev_dev,
|
||||
"MA parity error, parity status %#x\n",
|
||||
t4_read_reg(adap, MA_PARITY_ERROR_STATUS));
|
||||
if (is_t5(adap->params.chip))
|
||||
dev_alert(adap->pdev_dev,
|
||||
"MA parity error, parity status %#x\n",
|
||||
t4_read_reg(adap,
|
||||
MA_PARITY_ERROR_STATUS2));
|
||||
}
|
||||
if (status & MEM_WRAP_INT_CAUSE) {
|
||||
v = t4_read_reg(adap, MA_INT_WRAP_STATUS);
|
||||
dev_alert(adap->pdev_dev, "MA address wrap-around error by "
|
||||
@ -2733,12 +2783,16 @@ retry:
|
||||
/*
|
||||
* Issue the HELLO command to the firmware. If it's not successful
|
||||
* but indicates that we got a "busy" or "timeout" condition, retry
|
||||
* the HELLO until we exhaust our retry limit.
|
||||
* the HELLO until we exhaust our retry limit. If we do exceed our
|
||||
* retry limit, check to see if the firmware left us any error
|
||||
* information and report that if so.
|
||||
*/
|
||||
ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c);
|
||||
if (ret < 0) {
|
||||
if ((ret == -EBUSY || ret == -ETIMEDOUT) && retries-- > 0)
|
||||
goto retry;
|
||||
if (t4_read_reg(adap, MA_PCIE_FW) & FW_PCIE_FW_ERR)
|
||||
t4_report_fw_error(adap);
|
||||
return ret;
|
||||
}
|
||||
|
||||
@ -3742,6 +3796,7 @@ int t4_handle_fw_rpl(struct adapter *adap, const __be64 *rpl)
|
||||
lc->link_ok = link_ok;
|
||||
lc->speed = speed;
|
||||
lc->fc = fc;
|
||||
lc->supported = be16_to_cpu(p->u.info.pcap);
|
||||
t4_os_link_changed(adap, port, link_ok);
|
||||
}
|
||||
if (mod != pi->mod_type) {
|
||||
|
@ -511,6 +511,7 @@
|
||||
#define MEM_WRAP_CLIENT_NUM_GET(x) (((x) & MEM_WRAP_CLIENT_NUM_MASK) >> MEM_WRAP_CLIENT_NUM_SHIFT)
|
||||
#define MA_PCIE_FW 0x30b8
|
||||
#define MA_PARITY_ERROR_STATUS 0x77f4
|
||||
#define MA_PARITY_ERROR_STATUS2 0x7804
|
||||
|
||||
#define MA_EXT_MEMORY1_BAR 0x7808
|
||||
#define EDC_0_BASE_ADDR 0x7900
|
||||
@ -959,6 +960,7 @@
|
||||
#define TRCMULTIFILTER 0x00000001U
|
||||
|
||||
#define MPS_TRC_RSS_CONTROL 0x9808
|
||||
#define MPS_T5_TRC_RSS_CONTROL 0xa00c
|
||||
#define RSSCONTROL_MASK 0x00ff0000U
|
||||
#define RSSCONTROL_SHIFT 16
|
||||
#define RSSCONTROL(x) ((x) << RSSCONTROL_SHIFT)
|
||||
|
@ -2228,6 +2228,10 @@ struct fw_debug_cmd {
|
||||
#define FW_PCIE_FW_MASTER(x) ((x) << FW_PCIE_FW_MASTER_SHIFT)
|
||||
#define FW_PCIE_FW_MASTER_GET(x) (((x) >> FW_PCIE_FW_MASTER_SHIFT) & \
|
||||
FW_PCIE_FW_MASTER_MASK)
|
||||
#define FW_PCIE_FW_EVAL_MASK 0x7
|
||||
#define FW_PCIE_FW_EVAL_SHIFT 24
|
||||
#define FW_PCIE_FW_EVAL_GET(x) (((x) >> FW_PCIE_FW_EVAL_SHIFT) & \
|
||||
FW_PCIE_FW_EVAL_MASK)
|
||||
|
||||
struct fw_hdr {
|
||||
u8 ver;
|
||||
|
@ -1994,7 +1994,7 @@ static void xmit_common(struct sk_buff *skb, struct ehea_swqe *swqe)
|
||||
{
|
||||
swqe->tx_control |= EHEA_SWQE_IMM_DATA_PRESENT | EHEA_SWQE_CRC;
|
||||
|
||||
if (skb->protocol != htons(ETH_P_IP))
|
||||
if (vlan_get_protocol(skb) != htons(ETH_P_IP))
|
||||
return;
|
||||
|
||||
if (skb->ip_summed == CHECKSUM_PARTIAL)
|
||||
|
@ -2674,7 +2674,8 @@ set_itr_now:
|
||||
#define E1000_TX_FLAGS_VLAN_SHIFT 16
|
||||
|
||||
static int e1000_tso(struct e1000_adapter *adapter,
|
||||
struct e1000_tx_ring *tx_ring, struct sk_buff *skb)
|
||||
struct e1000_tx_ring *tx_ring, struct sk_buff *skb,
|
||||
__be16 protocol)
|
||||
{
|
||||
struct e1000_context_desc *context_desc;
|
||||
struct e1000_buffer *buffer_info;
|
||||
@ -2692,7 +2693,7 @@ static int e1000_tso(struct e1000_adapter *adapter,
|
||||
|
||||
hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
|
||||
mss = skb_shinfo(skb)->gso_size;
|
||||
if (skb->protocol == htons(ETH_P_IP)) {
|
||||
if (protocol == htons(ETH_P_IP)) {
|
||||
struct iphdr *iph = ip_hdr(skb);
|
||||
iph->tot_len = 0;
|
||||
iph->check = 0;
|
||||
@ -2702,7 +2703,7 @@ static int e1000_tso(struct e1000_adapter *adapter,
|
||||
0);
|
||||
cmd_length = E1000_TXD_CMD_IP;
|
||||
ipcse = skb_transport_offset(skb) - 1;
|
||||
} else if (skb->protocol == htons(ETH_P_IPV6)) {
|
||||
} else if (skb_is_gso_v6(skb)) {
|
||||
ipv6_hdr(skb)->payload_len = 0;
|
||||
tcp_hdr(skb)->check =
|
||||
~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
|
||||
@ -2745,7 +2746,8 @@ static int e1000_tso(struct e1000_adapter *adapter,
|
||||
}
|
||||
|
||||
static bool e1000_tx_csum(struct e1000_adapter *adapter,
|
||||
struct e1000_tx_ring *tx_ring, struct sk_buff *skb)
|
||||
struct e1000_tx_ring *tx_ring, struct sk_buff *skb,
|
||||
__be16 protocol)
|
||||
{
|
||||
struct e1000_context_desc *context_desc;
|
||||
struct e1000_buffer *buffer_info;
|
||||
@ -2756,7 +2758,7 @@ static bool e1000_tx_csum(struct e1000_adapter *adapter,
|
||||
if (skb->ip_summed != CHECKSUM_PARTIAL)
|
||||
return false;
|
||||
|
||||
switch (skb->protocol) {
|
||||
switch (protocol) {
|
||||
case cpu_to_be16(ETH_P_IP):
|
||||
if (ip_hdr(skb)->protocol == IPPROTO_TCP)
|
||||
cmd_len |= E1000_TXD_CMD_TCP;
|
||||
@ -3097,6 +3099,7 @@ static netdev_tx_t e1000_xmit_frame(struct sk_buff *skb,
|
||||
int count = 0;
|
||||
int tso;
|
||||
unsigned int f;
|
||||
__be16 protocol = vlan_get_protocol(skb);
|
||||
|
||||
/* This goes back to the question of how to logically map a Tx queue
|
||||
* to a flow. Right now, performance is impacted slightly negatively
|
||||
@ -3210,7 +3213,7 @@ static netdev_tx_t e1000_xmit_frame(struct sk_buff *skb,
|
||||
|
||||
first = tx_ring->next_to_use;
|
||||
|
||||
tso = e1000_tso(adapter, tx_ring, skb);
|
||||
tso = e1000_tso(adapter, tx_ring, skb, protocol);
|
||||
if (tso < 0) {
|
||||
dev_kfree_skb_any(skb);
|
||||
return NETDEV_TX_OK;
|
||||
@ -3220,10 +3223,10 @@ static netdev_tx_t e1000_xmit_frame(struct sk_buff *skb,
|
||||
if (likely(hw->mac_type != e1000_82544))
|
||||
tx_ring->last_tx_tso = true;
|
||||
tx_flags |= E1000_TX_FLAGS_TSO;
|
||||
} else if (likely(e1000_tx_csum(adapter, tx_ring, skb)))
|
||||
} else if (likely(e1000_tx_csum(adapter, tx_ring, skb, protocol)))
|
||||
tx_flags |= E1000_TX_FLAGS_CSUM;
|
||||
|
||||
if (likely(skb->protocol == htons(ETH_P_IP)))
|
||||
if (protocol == htons(ETH_P_IP))
|
||||
tx_flags |= E1000_TX_FLAGS_IPV4;
|
||||
|
||||
if (unlikely(skb->no_fcs))
|
||||
|
@ -5164,7 +5164,8 @@ link_up:
|
||||
#define E1000_TX_FLAGS_VLAN_MASK 0xffff0000
|
||||
#define E1000_TX_FLAGS_VLAN_SHIFT 16
|
||||
|
||||
static int e1000_tso(struct e1000_ring *tx_ring, struct sk_buff *skb)
|
||||
static int e1000_tso(struct e1000_ring *tx_ring, struct sk_buff *skb,
|
||||
__be16 protocol)
|
||||
{
|
||||
struct e1000_context_desc *context_desc;
|
||||
struct e1000_buffer *buffer_info;
|
||||
@ -5183,7 +5184,7 @@ static int e1000_tso(struct e1000_ring *tx_ring, struct sk_buff *skb)
|
||||
|
||||
hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
|
||||
mss = skb_shinfo(skb)->gso_size;
|
||||
if (skb->protocol == htons(ETH_P_IP)) {
|
||||
if (protocol == htons(ETH_P_IP)) {
|
||||
struct iphdr *iph = ip_hdr(skb);
|
||||
iph->tot_len = 0;
|
||||
iph->check = 0;
|
||||
@ -5231,7 +5232,8 @@ static int e1000_tso(struct e1000_ring *tx_ring, struct sk_buff *skb)
|
||||
return 1;
|
||||
}
|
||||
|
||||
static bool e1000_tx_csum(struct e1000_ring *tx_ring, struct sk_buff *skb)
|
||||
static bool e1000_tx_csum(struct e1000_ring *tx_ring, struct sk_buff *skb,
|
||||
__be16 protocol)
|
||||
{
|
||||
struct e1000_adapter *adapter = tx_ring->adapter;
|
||||
struct e1000_context_desc *context_desc;
|
||||
@ -5239,16 +5241,10 @@ static bool e1000_tx_csum(struct e1000_ring *tx_ring, struct sk_buff *skb)
|
||||
unsigned int i;
|
||||
u8 css;
|
||||
u32 cmd_len = E1000_TXD_CMD_DEXT;
|
||||
__be16 protocol;
|
||||
|
||||
if (skb->ip_summed != CHECKSUM_PARTIAL)
|
||||
return false;
|
||||
|
||||
if (skb->protocol == cpu_to_be16(ETH_P_8021Q))
|
||||
protocol = vlan_eth_hdr(skb)->h_vlan_encapsulated_proto;
|
||||
else
|
||||
protocol = skb->protocol;
|
||||
|
||||
switch (protocol) {
|
||||
case cpu_to_be16(ETH_P_IP):
|
||||
if (ip_hdr(skb)->protocol == IPPROTO_TCP)
|
||||
@ -5546,6 +5542,7 @@ static netdev_tx_t e1000_xmit_frame(struct sk_buff *skb,
|
||||
int count = 0;
|
||||
int tso;
|
||||
unsigned int f;
|
||||
__be16 protocol = vlan_get_protocol(skb);
|
||||
|
||||
if (test_bit(__E1000_DOWN, &adapter->state)) {
|
||||
dev_kfree_skb_any(skb);
|
||||
@ -5620,7 +5617,7 @@ static netdev_tx_t e1000_xmit_frame(struct sk_buff *skb,
|
||||
|
||||
first = tx_ring->next_to_use;
|
||||
|
||||
tso = e1000_tso(tx_ring, skb);
|
||||
tso = e1000_tso(tx_ring, skb, protocol);
|
||||
if (tso < 0) {
|
||||
dev_kfree_skb_any(skb);
|
||||
return NETDEV_TX_OK;
|
||||
@ -5628,14 +5625,14 @@ static netdev_tx_t e1000_xmit_frame(struct sk_buff *skb,
|
||||
|
||||
if (tso)
|
||||
tx_flags |= E1000_TX_FLAGS_TSO;
|
||||
else if (e1000_tx_csum(tx_ring, skb))
|
||||
else if (e1000_tx_csum(tx_ring, skb, protocol))
|
||||
tx_flags |= E1000_TX_FLAGS_CSUM;
|
||||
|
||||
/* Old method was to assume IPv4 packet by default if TSO was enabled.
|
||||
* 82571 hardware supports TSO capabilities for IPv6 as well...
|
||||
* no longer assume, we must.
|
||||
*/
|
||||
if (skb->protocol == htons(ETH_P_IP))
|
||||
if (protocol == htons(ETH_P_IP))
|
||||
tx_flags |= E1000_TX_FLAGS_IPV4;
|
||||
|
||||
if (unlikely(skb->no_fcs))
|
||||
|
@ -2295,7 +2295,7 @@ static netdev_tx_t i40e_xmit_frame_ring(struct sk_buff *skb,
|
||||
goto out_drop;
|
||||
|
||||
/* obtain protocol of skb */
|
||||
protocol = skb->protocol;
|
||||
protocol = vlan_get_protocol(skb);
|
||||
|
||||
/* record the location of the first descriptor for this packet */
|
||||
first = &tx_ring->tx_bi[tx_ring->next_to_use];
|
||||
|
@ -1597,7 +1597,7 @@ static netdev_tx_t i40e_xmit_frame_ring(struct sk_buff *skb,
|
||||
goto out_drop;
|
||||
|
||||
/* obtain protocol of skb */
|
||||
protocol = skb->protocol;
|
||||
protocol = vlan_get_protocol(skb);
|
||||
|
||||
/* record the location of the first descriptor for this packet */
|
||||
first = &tx_ring->tx_bi[tx_ring->next_to_use];
|
||||
|
@ -20,6 +20,7 @@
|
||||
#include <linux/mbus.h>
|
||||
#include <linux/module.h>
|
||||
#include <linux/interrupt.h>
|
||||
#include <linux/if_vlan.h>
|
||||
#include <net/ip.h>
|
||||
#include <net/ipv6.h>
|
||||
#include <linux/io.h>
|
||||
@ -1371,15 +1372,16 @@ static u32 mvneta_skb_tx_csum(struct mvneta_port *pp, struct sk_buff *skb)
|
||||
{
|
||||
if (skb->ip_summed == CHECKSUM_PARTIAL) {
|
||||
int ip_hdr_len = 0;
|
||||
__be16 l3_proto = vlan_get_protocol(skb);
|
||||
u8 l4_proto;
|
||||
|
||||
if (skb->protocol == htons(ETH_P_IP)) {
|
||||
if (l3_proto == htons(ETH_P_IP)) {
|
||||
struct iphdr *ip4h = ip_hdr(skb);
|
||||
|
||||
/* Calculate IPv4 checksum and L4 checksum */
|
||||
ip_hdr_len = ip4h->ihl;
|
||||
l4_proto = ip4h->protocol;
|
||||
} else if (skb->protocol == htons(ETH_P_IPV6)) {
|
||||
} else if (l3_proto == htons(ETH_P_IPV6)) {
|
||||
struct ipv6hdr *ip6h = ipv6_hdr(skb);
|
||||
|
||||
/* Read l4_protocol from one of IPv6 extra headers */
|
||||
@ -1390,7 +1392,7 @@ static u32 mvneta_skb_tx_csum(struct mvneta_port *pp, struct sk_buff *skb)
|
||||
return MVNETA_TX_L4_CSUM_NOT;
|
||||
|
||||
return mvneta_txq_desc_csum(skb_network_offset(skb),
|
||||
skb->protocol, ip_hdr_len, l4_proto);
|
||||
l3_proto, ip_hdr_len, l4_proto);
|
||||
}
|
||||
|
||||
return MVNETA_TX_L4_CSUM_NOT;
|
||||
|
@ -474,39 +474,12 @@ static int mlx4_en_tunnel_steer_add(struct mlx4_en_priv *priv, unsigned char *ad
|
||||
int qpn, u64 *reg_id)
|
||||
{
|
||||
int err;
|
||||
struct mlx4_spec_list spec_eth_outer = { {NULL} };
|
||||
struct mlx4_spec_list spec_vxlan = { {NULL} };
|
||||
struct mlx4_spec_list spec_eth_inner = { {NULL} };
|
||||
|
||||
struct mlx4_net_trans_rule rule = {
|
||||
.queue_mode = MLX4_NET_TRANS_Q_FIFO,
|
||||
.exclusive = 0,
|
||||
.allow_loopback = 1,
|
||||
.promisc_mode = MLX4_FS_REGULAR,
|
||||
.priority = MLX4_DOMAIN_NIC,
|
||||
};
|
||||
|
||||
__be64 mac_mask = cpu_to_be64(MLX4_MAC_MASK << 16);
|
||||
|
||||
if (priv->mdev->dev->caps.tunnel_offload_mode != MLX4_TUNNEL_OFFLOAD_MODE_VXLAN)
|
||||
return 0; /* do nothing */
|
||||
|
||||
rule.port = priv->port;
|
||||
rule.qpn = qpn;
|
||||
INIT_LIST_HEAD(&rule.list);
|
||||
|
||||
spec_eth_outer.id = MLX4_NET_TRANS_RULE_ID_ETH;
|
||||
memcpy(spec_eth_outer.eth.dst_mac, addr, ETH_ALEN);
|
||||
memcpy(spec_eth_outer.eth.dst_mac_msk, &mac_mask, ETH_ALEN);
|
||||
|
||||
spec_vxlan.id = MLX4_NET_TRANS_RULE_ID_VXLAN; /* any vxlan header */
|
||||
spec_eth_inner.id = MLX4_NET_TRANS_RULE_ID_ETH; /* any inner eth header */
|
||||
|
||||
list_add_tail(&spec_eth_outer.list, &rule.list);
|
||||
list_add_tail(&spec_vxlan.list, &rule.list);
|
||||
list_add_tail(&spec_eth_inner.list, &rule.list);
|
||||
|
||||
err = mlx4_flow_attach(priv->mdev->dev, &rule, reg_id);
|
||||
err = mlx4_tunnel_steer_add(priv->mdev->dev, addr, priv->port, qpn,
|
||||
MLX4_DOMAIN_NIC, reg_id);
|
||||
if (err) {
|
||||
en_err(priv, "failed to add vxlan steering rule, err %d\n", err);
|
||||
return err;
|
||||
|
@ -1020,6 +1020,44 @@ int mlx4_flow_detach(struct mlx4_dev *dev, u64 reg_id)
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(mlx4_flow_detach);
|
||||
|
||||
int mlx4_tunnel_steer_add(struct mlx4_dev *dev, unsigned char *addr,
|
||||
int port, int qpn, u16 prio, u64 *reg_id)
|
||||
{
|
||||
int err;
|
||||
struct mlx4_spec_list spec_eth_outer = { {NULL} };
|
||||
struct mlx4_spec_list spec_vxlan = { {NULL} };
|
||||
struct mlx4_spec_list spec_eth_inner = { {NULL} };
|
||||
|
||||
struct mlx4_net_trans_rule rule = {
|
||||
.queue_mode = MLX4_NET_TRANS_Q_FIFO,
|
||||
.exclusive = 0,
|
||||
.allow_loopback = 1,
|
||||
.promisc_mode = MLX4_FS_REGULAR,
|
||||
};
|
||||
|
||||
__be64 mac_mask = cpu_to_be64(MLX4_MAC_MASK << 16);
|
||||
|
||||
rule.port = port;
|
||||
rule.qpn = qpn;
|
||||
rule.priority = prio;
|
||||
INIT_LIST_HEAD(&rule.list);
|
||||
|
||||
spec_eth_outer.id = MLX4_NET_TRANS_RULE_ID_ETH;
|
||||
memcpy(spec_eth_outer.eth.dst_mac, addr, ETH_ALEN);
|
||||
memcpy(spec_eth_outer.eth.dst_mac_msk, &mac_mask, ETH_ALEN);
|
||||
|
||||
spec_vxlan.id = MLX4_NET_TRANS_RULE_ID_VXLAN; /* any vxlan header */
|
||||
spec_eth_inner.id = MLX4_NET_TRANS_RULE_ID_ETH; /* any inner eth header */
|
||||
|
||||
list_add_tail(&spec_eth_outer.list, &rule.list);
|
||||
list_add_tail(&spec_vxlan.list, &rule.list);
|
||||
list_add_tail(&spec_eth_inner.list, &rule.list);
|
||||
|
||||
err = mlx4_flow_attach(dev, &rule, reg_id);
|
||||
return err;
|
||||
}
|
||||
EXPORT_SYMBOL(mlx4_tunnel_steer_add);
|
||||
|
||||
int mlx4_FLOW_STEERING_IB_UC_QP_RANGE(struct mlx4_dev *dev, u32 min_range_qpn,
|
||||
u32 max_range_qpn)
|
||||
{
|
||||
|
@ -206,7 +206,7 @@ static int moxart_rx_poll(struct napi_struct *napi, int budget)
|
||||
int rx_head = priv->rx_head;
|
||||
int rx = 0;
|
||||
|
||||
while (1) {
|
||||
while (rx < budget) {
|
||||
desc = priv->rx_desc_base + (RX_REG_DESC_SIZE * rx_head);
|
||||
desc0 = readl(desc + RX_REG_OFFSET_DESC0);
|
||||
|
||||
@ -218,7 +218,7 @@ static int moxart_rx_poll(struct napi_struct *napi, int budget)
|
||||
net_dbg_ratelimited("packet error\n");
|
||||
priv->stats.rx_dropped++;
|
||||
priv->stats.rx_errors++;
|
||||
continue;
|
||||
goto rx_next;
|
||||
}
|
||||
|
||||
len = desc0 & RX_DESC0_FRAME_LEN_MASK;
|
||||
@ -226,13 +226,19 @@ static int moxart_rx_poll(struct napi_struct *napi, int budget)
|
||||
if (len > RX_BUF_SIZE)
|
||||
len = RX_BUF_SIZE;
|
||||
|
||||
skb = build_skb(priv->rx_buf[rx_head], priv->rx_buf_size);
|
||||
dma_sync_single_for_cpu(&ndev->dev,
|
||||
priv->rx_mapping[rx_head],
|
||||
priv->rx_buf_size, DMA_FROM_DEVICE);
|
||||
skb = netdev_alloc_skb_ip_align(ndev, len);
|
||||
|
||||
if (unlikely(!skb)) {
|
||||
net_dbg_ratelimited("build_skb failed\n");
|
||||
net_dbg_ratelimited("netdev_alloc_skb_ip_align failed\n");
|
||||
priv->stats.rx_dropped++;
|
||||
priv->stats.rx_errors++;
|
||||
goto rx_next;
|
||||
}
|
||||
|
||||
memcpy(skb->data, priv->rx_buf[rx_head], len);
|
||||
skb_put(skb, len);
|
||||
skb->protocol = eth_type_trans(skb, ndev);
|
||||
napi_gro_receive(&priv->napi, skb);
|
||||
@ -244,18 +250,15 @@ static int moxart_rx_poll(struct napi_struct *napi, int budget)
|
||||
if (desc0 & RX_DESC0_MULTICAST)
|
||||
priv->stats.multicast++;
|
||||
|
||||
rx_next:
|
||||
writel(RX_DESC0_DMA_OWN, desc + RX_REG_OFFSET_DESC0);
|
||||
|
||||
rx_head = RX_NEXT(rx_head);
|
||||
priv->rx_head = rx_head;
|
||||
|
||||
if (rx >= budget)
|
||||
break;
|
||||
}
|
||||
|
||||
if (rx < budget) {
|
||||
napi_gro_flush(napi, false);
|
||||
__napi_complete(napi);
|
||||
napi_complete(napi);
|
||||
}
|
||||
|
||||
priv->reg_imr |= RPKT_FINISH_M;
|
||||
@ -346,10 +349,12 @@ static int moxart_mac_start_xmit(struct sk_buff *skb, struct net_device *ndev)
|
||||
len = ETH_ZLEN;
|
||||
}
|
||||
|
||||
txdes1 = readl(desc + TX_REG_OFFSET_DESC1);
|
||||
txdes1 |= TX_DESC1_LTS | TX_DESC1_FTS;
|
||||
txdes1 &= ~(TX_DESC1_FIFO_COMPLETE | TX_DESC1_INTR_COMPLETE);
|
||||
txdes1 |= (len & TX_DESC1_BUF_SIZE_MASK);
|
||||
dma_sync_single_for_device(&ndev->dev, priv->tx_mapping[tx_head],
|
||||
priv->tx_buf_size, DMA_TO_DEVICE);
|
||||
|
||||
txdes1 = TX_DESC1_LTS | TX_DESC1_FTS | (len & TX_DESC1_BUF_SIZE_MASK);
|
||||
if (tx_head == TX_DESC_NUM_MASK)
|
||||
txdes1 |= TX_DESC1_END;
|
||||
writel(txdes1, desc + TX_REG_OFFSET_DESC1);
|
||||
writel(TX_DESC0_DMA_OWN, desc + TX_REG_OFFSET_DESC0);
|
||||
|
||||
@ -465,8 +470,7 @@ static int moxart_mac_probe(struct platform_device *pdev)
|
||||
spin_lock_init(&priv->txlock);
|
||||
|
||||
priv->tx_buf_size = TX_BUF_SIZE;
|
||||
priv->rx_buf_size = RX_BUF_SIZE +
|
||||
SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
|
||||
priv->rx_buf_size = RX_BUF_SIZE;
|
||||
|
||||
priv->tx_desc_base = dma_alloc_coherent(NULL, TX_REG_DESC_SIZE *
|
||||
TX_DESC_NUM, &priv->tx_base,
|
||||
|
@ -1220,6 +1220,9 @@ static int lpc_eth_open(struct net_device *ndev)
|
||||
|
||||
__lpc_eth_clock_enable(pldat, true);
|
||||
|
||||
/* Suspended PHY makes LPC ethernet core block, so resume now */
|
||||
phy_resume(pldat->phy_dev);
|
||||
|
||||
/* Reset and initialize */
|
||||
__lpc_eth_reset(pldat);
|
||||
__lpc_eth_init(pldat);
|
||||
|
@ -2556,6 +2556,7 @@ static int ql_tso(struct sk_buff *skb, struct ob_mac_tso_iocb_req *mac_iocb_ptr)
|
||||
|
||||
if (skb_is_gso(skb)) {
|
||||
int err;
|
||||
__be16 l3_proto = vlan_get_protocol(skb);
|
||||
|
||||
err = skb_cow_head(skb, 0);
|
||||
if (err < 0)
|
||||
@ -2572,7 +2573,7 @@ static int ql_tso(struct sk_buff *skb, struct ob_mac_tso_iocb_req *mac_iocb_ptr)
|
||||
<< OB_MAC_TRANSPORT_HDR_SHIFT);
|
||||
mac_iocb_ptr->mss = cpu_to_le16(skb_shinfo(skb)->gso_size);
|
||||
mac_iocb_ptr->flags2 |= OB_MAC_TSO_IOCB_LSO;
|
||||
if (likely(skb->protocol == htons(ETH_P_IP))) {
|
||||
if (likely(l3_proto == htons(ETH_P_IP))) {
|
||||
struct iphdr *iph = ip_hdr(skb);
|
||||
iph->check = 0;
|
||||
mac_iocb_ptr->flags1 |= OB_MAC_TSO_IOCB_IP4;
|
||||
@ -2580,7 +2581,7 @@ static int ql_tso(struct sk_buff *skb, struct ob_mac_tso_iocb_req *mac_iocb_ptr)
|
||||
iph->daddr, 0,
|
||||
IPPROTO_TCP,
|
||||
0);
|
||||
} else if (skb->protocol == htons(ETH_P_IPV6)) {
|
||||
} else if (l3_proto == htons(ETH_P_IPV6)) {
|
||||
mac_iocb_ptr->flags1 |= OB_MAC_TSO_IOCB_IP6;
|
||||
tcp_hdr(skb)->check =
|
||||
~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
|
||||
|
@ -5,6 +5,7 @@
|
||||
config SH_ETH
|
||||
tristate "Renesas SuperH Ethernet support"
|
||||
depends on HAS_DMA
|
||||
depends on ARCH_SHMOBILE || SUPERH || COMPILE_TEST
|
||||
select CRC32
|
||||
select MII
|
||||
select MDIO_BITBANG
|
||||
|
@ -28,7 +28,7 @@
|
||||
|
||||
#include "stmmac.h"
|
||||
|
||||
static unsigned int stmmac_jumbo_frm(void *p, struct sk_buff *skb, int csum)
|
||||
static int stmmac_jumbo_frm(void *p, struct sk_buff *skb, int csum)
|
||||
{
|
||||
struct stmmac_priv *priv = (struct stmmac_priv *)p;
|
||||
unsigned int txsize = priv->dma_tx_size;
|
||||
@ -47,7 +47,9 @@ static unsigned int stmmac_jumbo_frm(void *p, struct sk_buff *skb, int csum)
|
||||
|
||||
desc->des2 = dma_map_single(priv->device, skb->data,
|
||||
bmax, DMA_TO_DEVICE);
|
||||
priv->tx_skbuff_dma[entry] = desc->des2;
|
||||
if (dma_mapping_error(priv->device, desc->des2))
|
||||
return -1;
|
||||
priv->tx_skbuff_dma[entry].buf = desc->des2;
|
||||
priv->hw->desc->prepare_tx_desc(desc, 1, bmax, csum, STMMAC_CHAIN_MODE);
|
||||
|
||||
while (len != 0) {
|
||||
@ -59,7 +61,9 @@ static unsigned int stmmac_jumbo_frm(void *p, struct sk_buff *skb, int csum)
|
||||
desc->des2 = dma_map_single(priv->device,
|
||||
(skb->data + bmax * i),
|
||||
bmax, DMA_TO_DEVICE);
|
||||
priv->tx_skbuff_dma[entry] = desc->des2;
|
||||
if (dma_mapping_error(priv->device, desc->des2))
|
||||
return -1;
|
||||
priv->tx_skbuff_dma[entry].buf = desc->des2;
|
||||
priv->hw->desc->prepare_tx_desc(desc, 0, bmax, csum,
|
||||
STMMAC_CHAIN_MODE);
|
||||
priv->hw->desc->set_tx_owner(desc);
|
||||
@ -69,7 +73,9 @@ static unsigned int stmmac_jumbo_frm(void *p, struct sk_buff *skb, int csum)
|
||||
desc->des2 = dma_map_single(priv->device,
|
||||
(skb->data + bmax * i), len,
|
||||
DMA_TO_DEVICE);
|
||||
priv->tx_skbuff_dma[entry] = desc->des2;
|
||||
if (dma_mapping_error(priv->device, desc->des2))
|
||||
return -1;
|
||||
priv->tx_skbuff_dma[entry].buf = desc->des2;
|
||||
priv->hw->desc->prepare_tx_desc(desc, 0, len, csum,
|
||||
STMMAC_CHAIN_MODE);
|
||||
priv->hw->desc->set_tx_owner(desc);
|
||||
|
@ -220,10 +220,10 @@ enum dma_irq_status {
|
||||
handle_tx = 0x8,
|
||||
};
|
||||
|
||||
#define CORE_IRQ_TX_PATH_IN_LPI_MODE (1 << 1)
|
||||
#define CORE_IRQ_TX_PATH_EXIT_LPI_MODE (1 << 2)
|
||||
#define CORE_IRQ_RX_PATH_IN_LPI_MODE (1 << 3)
|
||||
#define CORE_IRQ_RX_PATH_EXIT_LPI_MODE (1 << 4)
|
||||
#define CORE_IRQ_TX_PATH_IN_LPI_MODE (1 << 0)
|
||||
#define CORE_IRQ_TX_PATH_EXIT_LPI_MODE (1 << 1)
|
||||
#define CORE_IRQ_RX_PATH_IN_LPI_MODE (1 << 2)
|
||||
#define CORE_IRQ_RX_PATH_EXIT_LPI_MODE (1 << 3)
|
||||
|
||||
#define CORE_PCS_ANE_COMPLETE (1 << 5)
|
||||
#define CORE_PCS_LINK_STATUS (1 << 6)
|
||||
@ -287,7 +287,7 @@ struct dma_features {
|
||||
|
||||
/* Default LPI timers */
|
||||
#define STMMAC_DEFAULT_LIT_LS 0x3E8
|
||||
#define STMMAC_DEFAULT_TWT_LS 0x0
|
||||
#define STMMAC_DEFAULT_TWT_LS 0x1E
|
||||
|
||||
#define STMMAC_CHAIN_MODE 0x1
|
||||
#define STMMAC_RING_MODE 0x2
|
||||
@ -425,7 +425,7 @@ struct stmmac_mode_ops {
|
||||
void (*init) (void *des, dma_addr_t phy_addr, unsigned int size,
|
||||
unsigned int extend_desc);
|
||||
unsigned int (*is_jumbo_frm) (int len, int ehn_desc);
|
||||
unsigned int (*jumbo_frm) (void *priv, struct sk_buff *skb, int csum);
|
||||
int (*jumbo_frm)(void *priv, struct sk_buff *skb, int csum);
|
||||
int (*set_16kib_bfsize)(int mtu);
|
||||
void (*init_desc3)(struct dma_desc *p);
|
||||
void (*refill_desc3) (void *priv, struct dma_desc *p);
|
||||
@ -445,6 +445,7 @@ struct mac_device_info {
|
||||
int multicast_filter_bins;
|
||||
int unicast_filter_entries;
|
||||
int mcast_bits_log2;
|
||||
unsigned int rx_csum;
|
||||
};
|
||||
|
||||
struct mac_device_info *dwmac1000_setup(void __iomem *ioaddr, int mcbins,
|
||||
|
@ -153,7 +153,7 @@ enum inter_frame_gap {
|
||||
#define GMAC_CONTROL_RE 0x00000004 /* Receiver Enable */
|
||||
|
||||
#define GMAC_CORE_INIT (GMAC_CONTROL_JD | GMAC_CONTROL_PS | GMAC_CONTROL_ACS | \
|
||||
GMAC_CONTROL_BE)
|
||||
GMAC_CONTROL_BE | GMAC_CONTROL_DCRS)
|
||||
|
||||
/* GMAC Frame Filter defines */
|
||||
#define GMAC_FRAME_FILTER_PR 0x00000001 /* Promiscuous Mode */
|
||||
|
@ -58,7 +58,11 @@ static int dwmac1000_rx_ipc_enable(struct mac_device_info *hw)
|
||||
void __iomem *ioaddr = hw->pcsr;
|
||||
u32 value = readl(ioaddr + GMAC_CONTROL);
|
||||
|
||||
value |= GMAC_CONTROL_IPC;
|
||||
if (hw->rx_csum)
|
||||
value |= GMAC_CONTROL_IPC;
|
||||
else
|
||||
value &= ~GMAC_CONTROL_IPC;
|
||||
|
||||
writel(value, ioaddr + GMAC_CONTROL);
|
||||
|
||||
value = readl(ioaddr + GMAC_CONTROL);
|
||||
|
@ -68,7 +68,7 @@ struct stmmac_counters {
|
||||
unsigned int mmc_rx_octetcount_g;
|
||||
unsigned int mmc_rx_broadcastframe_g;
|
||||
unsigned int mmc_rx_multicastframe_g;
|
||||
unsigned int mmc_rx_crc_errror;
|
||||
unsigned int mmc_rx_crc_error;
|
||||
unsigned int mmc_rx_align_error;
|
||||
unsigned int mmc_rx_run_error;
|
||||
unsigned int mmc_rx_jabber_error;
|
||||
|
@ -196,7 +196,7 @@ void dwmac_mmc_read(void __iomem *ioaddr, struct stmmac_counters *mmc)
|
||||
mmc->mmc_rx_octetcount_g += readl(ioaddr + MMC_RX_OCTETCOUNT_G);
|
||||
mmc->mmc_rx_broadcastframe_g += readl(ioaddr + MMC_RX_BROADCASTFRAME_G);
|
||||
mmc->mmc_rx_multicastframe_g += readl(ioaddr + MMC_RX_MULTICASTFRAME_G);
|
||||
mmc->mmc_rx_crc_errror += readl(ioaddr + MMC_RX_CRC_ERRROR);
|
||||
mmc->mmc_rx_crc_error += readl(ioaddr + MMC_RX_CRC_ERRROR);
|
||||
mmc->mmc_rx_align_error += readl(ioaddr + MMC_RX_ALIGN_ERROR);
|
||||
mmc->mmc_rx_run_error += readl(ioaddr + MMC_RX_RUN_ERROR);
|
||||
mmc->mmc_rx_jabber_error += readl(ioaddr + MMC_RX_JABBER_ERROR);
|
||||
|
@ -28,7 +28,7 @@
|
||||
|
||||
#include "stmmac.h"
|
||||
|
||||
static unsigned int stmmac_jumbo_frm(void *p, struct sk_buff *skb, int csum)
|
||||
static int stmmac_jumbo_frm(void *p, struct sk_buff *skb, int csum)
|
||||
{
|
||||
struct stmmac_priv *priv = (struct stmmac_priv *)p;
|
||||
unsigned int txsize = priv->dma_tx_size;
|
||||
@ -53,7 +53,10 @@ static unsigned int stmmac_jumbo_frm(void *p, struct sk_buff *skb, int csum)
|
||||
|
||||
desc->des2 = dma_map_single(priv->device, skb->data,
|
||||
bmax, DMA_TO_DEVICE);
|
||||
priv->tx_skbuff_dma[entry] = desc->des2;
|
||||
if (dma_mapping_error(priv->device, desc->des2))
|
||||
return -1;
|
||||
|
||||
priv->tx_skbuff_dma[entry].buf = desc->des2;
|
||||
desc->des3 = desc->des2 + BUF_SIZE_4KiB;
|
||||
priv->hw->desc->prepare_tx_desc(desc, 1, bmax, csum,
|
||||
STMMAC_RING_MODE);
|
||||
@ -68,7 +71,9 @@ static unsigned int stmmac_jumbo_frm(void *p, struct sk_buff *skb, int csum)
|
||||
|
||||
desc->des2 = dma_map_single(priv->device, skb->data + bmax,
|
||||
len, DMA_TO_DEVICE);
|
||||
priv->tx_skbuff_dma[entry] = desc->des2;
|
||||
if (dma_mapping_error(priv->device, desc->des2))
|
||||
return -1;
|
||||
priv->tx_skbuff_dma[entry].buf = desc->des2;
|
||||
desc->des3 = desc->des2 + BUF_SIZE_4KiB;
|
||||
priv->hw->desc->prepare_tx_desc(desc, 0, len, csum,
|
||||
STMMAC_RING_MODE);
|
||||
@ -77,7 +82,9 @@ static unsigned int stmmac_jumbo_frm(void *p, struct sk_buff *skb, int csum)
|
||||
} else {
|
||||
desc->des2 = dma_map_single(priv->device, skb->data,
|
||||
nopaged_len, DMA_TO_DEVICE);
|
||||
priv->tx_skbuff_dma[entry] = desc->des2;
|
||||
if (dma_mapping_error(priv->device, desc->des2))
|
||||
return -1;
|
||||
priv->tx_skbuff_dma[entry].buf = desc->des2;
|
||||
desc->des3 = desc->des2 + BUF_SIZE_4KiB;
|
||||
priv->hw->desc->prepare_tx_desc(desc, 1, nopaged_len, csum,
|
||||
STMMAC_RING_MODE);
|
||||
|
@ -34,6 +34,11 @@
|
||||
#include <linux/ptp_clock_kernel.h>
|
||||
#include <linux/reset.h>
|
||||
|
||||
struct stmmac_tx_info {
|
||||
dma_addr_t buf;
|
||||
bool map_as_page;
|
||||
};
|
||||
|
||||
struct stmmac_priv {
|
||||
/* Frequently used values are kept adjacent for cache effect */
|
||||
struct dma_extended_desc *dma_etx ____cacheline_aligned_in_smp;
|
||||
@ -45,7 +50,7 @@ struct stmmac_priv {
|
||||
u32 tx_count_frames;
|
||||
u32 tx_coal_frames;
|
||||
u32 tx_coal_timer;
|
||||
dma_addr_t *tx_skbuff_dma;
|
||||
struct stmmac_tx_info *tx_skbuff_dma;
|
||||
dma_addr_t dma_tx_phy;
|
||||
int tx_coalesce;
|
||||
int hwts_tx_en;
|
||||
@ -105,6 +110,8 @@ struct stmmac_priv {
|
||||
struct ptp_clock *ptp_clock;
|
||||
struct ptp_clock_info ptp_clock_ops;
|
||||
unsigned int default_addend;
|
||||
struct clk *clk_ptp_ref;
|
||||
unsigned int clk_ptp_rate;
|
||||
u32 adv_ts;
|
||||
int use_riwt;
|
||||
int irq_wake;
|
||||
|
@ -175,7 +175,7 @@ static const struct stmmac_stats stmmac_mmc[] = {
|
||||
STMMAC_MMC_STAT(mmc_rx_octetcount_g),
|
||||
STMMAC_MMC_STAT(mmc_rx_broadcastframe_g),
|
||||
STMMAC_MMC_STAT(mmc_rx_multicastframe_g),
|
||||
STMMAC_MMC_STAT(mmc_rx_crc_errror),
|
||||
STMMAC_MMC_STAT(mmc_rx_crc_error),
|
||||
STMMAC_MMC_STAT(mmc_rx_align_error),
|
||||
STMMAC_MMC_STAT(mmc_rx_run_error),
|
||||
STMMAC_MMC_STAT(mmc_rx_jabber_error),
|
||||
|
@ -275,6 +275,7 @@ static void stmmac_eee_ctrl_timer(unsigned long arg)
|
||||
*/
|
||||
bool stmmac_eee_init(struct stmmac_priv *priv)
|
||||
{
|
||||
char *phy_bus_name = priv->plat->phy_bus_name;
|
||||
bool ret = false;
|
||||
|
||||
/* Using PCS we cannot dial with the phy registers at this stage
|
||||
@ -284,6 +285,10 @@ bool stmmac_eee_init(struct stmmac_priv *priv)
|
||||
(priv->pcs == STMMAC_PCS_RTBI))
|
||||
goto out;
|
||||
|
||||
/* Never init EEE in case of a switch is attached */
|
||||
if (phy_bus_name && (!strcmp(phy_bus_name, "fixed")))
|
||||
goto out;
|
||||
|
||||
/* MAC core supports the EEE feature. */
|
||||
if (priv->dma_cap.eee) {
|
||||
int tx_lpi_timer = priv->tx_lpi_timer;
|
||||
@ -316,10 +321,9 @@ bool stmmac_eee_init(struct stmmac_priv *priv)
|
||||
priv->hw->mac->set_eee_timer(priv->hw,
|
||||
STMMAC_DEFAULT_LIT_LS,
|
||||
tx_lpi_timer);
|
||||
} else
|
||||
/* Set HW EEE according to the speed */
|
||||
priv->hw->mac->set_eee_pls(priv->hw,
|
||||
priv->phydev->link);
|
||||
}
|
||||
/* Set HW EEE according to the speed */
|
||||
priv->hw->mac->set_eee_pls(priv->hw, priv->phydev->link);
|
||||
|
||||
pr_debug("stmmac: Energy-Efficient Ethernet initialized\n");
|
||||
|
||||
@ -603,16 +607,16 @@ static int stmmac_hwtstamp_ioctl(struct net_device *dev, struct ifreq *ifr)
|
||||
/* calculate default added value:
|
||||
* formula is :
|
||||
* addend = (2^32)/freq_div_ratio;
|
||||
* where, freq_div_ratio = STMMAC_SYSCLOCK/50MHz
|
||||
* hence, addend = ((2^32) * 50MHz)/STMMAC_SYSCLOCK;
|
||||
* NOTE: STMMAC_SYSCLOCK should be >= 50MHz to
|
||||
* where, freq_div_ratio = clk_ptp_ref_i/50MHz
|
||||
* hence, addend = ((2^32) * 50MHz)/clk_ptp_ref_i;
|
||||
* NOTE: clk_ptp_ref_i should be >= 50MHz to
|
||||
* achive 20ns accuracy.
|
||||
*
|
||||
* 2^x * y == (y << x), hence
|
||||
* 2^32 * 50000000 ==> (50000000 << 32)
|
||||
*/
|
||||
temp = (u64) (50000000ULL << 32);
|
||||
priv->default_addend = div_u64(temp, STMMAC_SYSCLOCK);
|
||||
priv->default_addend = div_u64(temp, priv->clk_ptp_rate);
|
||||
priv->hw->ptp->config_addend(priv->ioaddr,
|
||||
priv->default_addend);
|
||||
|
||||
@ -638,6 +642,16 @@ static int stmmac_init_ptp(struct stmmac_priv *priv)
|
||||
if (!(priv->dma_cap.time_stamp || priv->dma_cap.atime_stamp))
|
||||
return -EOPNOTSUPP;
|
||||
|
||||
/* Fall-back to main clock in case of no PTP ref is passed */
|
||||
priv->clk_ptp_ref = devm_clk_get(priv->device, "clk_ptp_ref");
|
||||
if (IS_ERR(priv->clk_ptp_ref)) {
|
||||
priv->clk_ptp_rate = clk_get_rate(priv->stmmac_clk);
|
||||
priv->clk_ptp_ref = NULL;
|
||||
} else {
|
||||
clk_prepare_enable(priv->clk_ptp_ref);
|
||||
priv->clk_ptp_rate = clk_get_rate(priv->clk_ptp_ref);
|
||||
}
|
||||
|
||||
priv->adv_ts = 0;
|
||||
if (priv->dma_cap.atime_stamp && priv->extend_desc)
|
||||
priv->adv_ts = 1;
|
||||
@ -657,6 +671,8 @@ static int stmmac_init_ptp(struct stmmac_priv *priv)
|
||||
|
||||
static void stmmac_release_ptp(struct stmmac_priv *priv)
|
||||
{
|
||||
if (priv->clk_ptp_ref)
|
||||
clk_disable_unprepare(priv->clk_ptp_ref);
|
||||
stmmac_ptp_unregister(priv);
|
||||
}
|
||||
|
||||
@ -1061,7 +1077,8 @@ static int init_dma_desc_rings(struct net_device *dev)
|
||||
else
|
||||
p = priv->dma_tx + i;
|
||||
p->des2 = 0;
|
||||
priv->tx_skbuff_dma[i] = 0;
|
||||
priv->tx_skbuff_dma[i].buf = 0;
|
||||
priv->tx_skbuff_dma[i].map_as_page = false;
|
||||
priv->tx_skbuff[i] = NULL;
|
||||
}
|
||||
|
||||
@ -1100,17 +1117,24 @@ static void dma_free_tx_skbufs(struct stmmac_priv *priv)
|
||||
else
|
||||
p = priv->dma_tx + i;
|
||||
|
||||
if (priv->tx_skbuff_dma[i]) {
|
||||
dma_unmap_single(priv->device,
|
||||
priv->tx_skbuff_dma[i],
|
||||
priv->hw->desc->get_tx_len(p),
|
||||
DMA_TO_DEVICE);
|
||||
priv->tx_skbuff_dma[i] = 0;
|
||||
if (priv->tx_skbuff_dma[i].buf) {
|
||||
if (priv->tx_skbuff_dma[i].map_as_page)
|
||||
dma_unmap_page(priv->device,
|
||||
priv->tx_skbuff_dma[i].buf,
|
||||
priv->hw->desc->get_tx_len(p),
|
||||
DMA_TO_DEVICE);
|
||||
else
|
||||
dma_unmap_single(priv->device,
|
||||
priv->tx_skbuff_dma[i].buf,
|
||||
priv->hw->desc->get_tx_len(p),
|
||||
DMA_TO_DEVICE);
|
||||
}
|
||||
|
||||
if (priv->tx_skbuff[i] != NULL) {
|
||||
dev_kfree_skb_any(priv->tx_skbuff[i]);
|
||||
priv->tx_skbuff[i] = NULL;
|
||||
priv->tx_skbuff_dma[i].buf = 0;
|
||||
priv->tx_skbuff_dma[i].map_as_page = false;
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -1131,7 +1155,8 @@ static int alloc_dma_desc_resources(struct stmmac_priv *priv)
|
||||
if (!priv->rx_skbuff)
|
||||
goto err_rx_skbuff;
|
||||
|
||||
priv->tx_skbuff_dma = kmalloc_array(txsize, sizeof(dma_addr_t),
|
||||
priv->tx_skbuff_dma = kmalloc_array(txsize,
|
||||
sizeof(*priv->tx_skbuff_dma),
|
||||
GFP_KERNEL);
|
||||
if (!priv->tx_skbuff_dma)
|
||||
goto err_tx_skbuff_dma;
|
||||
@ -1293,12 +1318,19 @@ static void stmmac_tx_clean(struct stmmac_priv *priv)
|
||||
pr_debug("%s: curr %d, dirty %d\n", __func__,
|
||||
priv->cur_tx, priv->dirty_tx);
|
||||
|
||||
if (likely(priv->tx_skbuff_dma[entry])) {
|
||||
dma_unmap_single(priv->device,
|
||||
priv->tx_skbuff_dma[entry],
|
||||
priv->hw->desc->get_tx_len(p),
|
||||
DMA_TO_DEVICE);
|
||||
priv->tx_skbuff_dma[entry] = 0;
|
||||
if (likely(priv->tx_skbuff_dma[entry].buf)) {
|
||||
if (priv->tx_skbuff_dma[entry].map_as_page)
|
||||
dma_unmap_page(priv->device,
|
||||
priv->tx_skbuff_dma[entry].buf,
|
||||
priv->hw->desc->get_tx_len(p),
|
||||
DMA_TO_DEVICE);
|
||||
else
|
||||
dma_unmap_single(priv->device,
|
||||
priv->tx_skbuff_dma[entry].buf,
|
||||
priv->hw->desc->get_tx_len(p),
|
||||
DMA_TO_DEVICE);
|
||||
priv->tx_skbuff_dma[entry].buf = 0;
|
||||
priv->tx_skbuff_dma[entry].map_as_page = false;
|
||||
}
|
||||
priv->hw->mode->clean_desc3(priv, p);
|
||||
|
||||
@ -1637,6 +1669,13 @@ static int stmmac_hw_setup(struct net_device *dev)
|
||||
/* Initialize the MAC Core */
|
||||
priv->hw->mac->core_init(priv->hw, dev->mtu);
|
||||
|
||||
ret = priv->hw->mac->rx_ipc(priv->hw);
|
||||
if (!ret) {
|
||||
pr_warn(" RX IPC Checksum Offload disabled\n");
|
||||
priv->plat->rx_coe = STMMAC_RX_COE_NONE;
|
||||
priv->hw->rx_csum = 0;
|
||||
}
|
||||
|
||||
/* Enable the MAC Rx/Tx */
|
||||
stmmac_set_mac(priv->ioaddr, true);
|
||||
|
||||
@ -1887,12 +1926,16 @@ static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev)
|
||||
if (likely(!is_jumbo)) {
|
||||
desc->des2 = dma_map_single(priv->device, skb->data,
|
||||
nopaged_len, DMA_TO_DEVICE);
|
||||
priv->tx_skbuff_dma[entry] = desc->des2;
|
||||
if (dma_mapping_error(priv->device, desc->des2))
|
||||
goto dma_map_err;
|
||||
priv->tx_skbuff_dma[entry].buf = desc->des2;
|
||||
priv->hw->desc->prepare_tx_desc(desc, 1, nopaged_len,
|
||||
csum_insertion, priv->mode);
|
||||
} else {
|
||||
desc = first;
|
||||
entry = priv->hw->mode->jumbo_frm(priv, skb, csum_insertion);
|
||||
if (unlikely(entry < 0))
|
||||
goto dma_map_err;
|
||||
}
|
||||
|
||||
for (i = 0; i < nfrags; i++) {
|
||||
@ -1908,7 +1951,11 @@ static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev)
|
||||
|
||||
desc->des2 = skb_frag_dma_map(priv->device, frag, 0, len,
|
||||
DMA_TO_DEVICE);
|
||||
priv->tx_skbuff_dma[entry] = desc->des2;
|
||||
if (dma_mapping_error(priv->device, desc->des2))
|
||||
goto dma_map_err; /* should reuse desc w/o issues */
|
||||
|
||||
priv->tx_skbuff_dma[entry].buf = desc->des2;
|
||||
priv->tx_skbuff_dma[entry].map_as_page = true;
|
||||
priv->hw->desc->prepare_tx_desc(desc, 0, len, csum_insertion,
|
||||
priv->mode);
|
||||
wmb();
|
||||
@ -1975,7 +2022,12 @@ static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev)
|
||||
priv->hw->dma->enable_dma_transmission(priv->ioaddr);
|
||||
|
||||
spin_unlock(&priv->tx_lock);
|
||||
return NETDEV_TX_OK;
|
||||
|
||||
dma_map_err:
|
||||
dev_err(priv->device, "Tx dma map failed\n");
|
||||
dev_kfree_skb(skb);
|
||||
priv->dev->stats.tx_dropped++;
|
||||
return NETDEV_TX_OK;
|
||||
}
|
||||
|
||||
@ -2028,7 +2080,12 @@ static inline void stmmac_rx_refill(struct stmmac_priv *priv)
|
||||
priv->rx_skbuff_dma[entry] =
|
||||
dma_map_single(priv->device, skb->data, bfsize,
|
||||
DMA_FROM_DEVICE);
|
||||
|
||||
if (dma_mapping_error(priv->device,
|
||||
priv->rx_skbuff_dma[entry])) {
|
||||
dev_err(priv->device, "Rx dma map failed\n");
|
||||
dev_kfree_skb(skb);
|
||||
break;
|
||||
}
|
||||
p->des2 = priv->rx_skbuff_dma[entry];
|
||||
|
||||
priv->hw->mode->refill_desc3(priv, p);
|
||||
@ -2055,7 +2112,7 @@ static int stmmac_rx(struct stmmac_priv *priv, int limit)
|
||||
unsigned int entry = priv->cur_rx % rxsize;
|
||||
unsigned int next_entry;
|
||||
unsigned int count = 0;
|
||||
int coe = priv->plat->rx_coe;
|
||||
int coe = priv->hw->rx_csum;
|
||||
|
||||
if (netif_msg_rx_status(priv)) {
|
||||
pr_debug("%s: descriptor ring:\n", __func__);
|
||||
@ -2276,8 +2333,7 @@ static netdev_features_t stmmac_fix_features(struct net_device *dev,
|
||||
|
||||
if (priv->plat->rx_coe == STMMAC_RX_COE_NONE)
|
||||
features &= ~NETIF_F_RXCSUM;
|
||||
else if (priv->plat->rx_coe == STMMAC_RX_COE_TYPE1)
|
||||
features &= ~NETIF_F_IPV6_CSUM;
|
||||
|
||||
if (!priv->plat->tx_coe)
|
||||
features &= ~NETIF_F_ALL_CSUM;
|
||||
|
||||
@ -2292,6 +2348,24 @@ static netdev_features_t stmmac_fix_features(struct net_device *dev,
|
||||
return features;
|
||||
}
|
||||
|
||||
static int stmmac_set_features(struct net_device *netdev,
|
||||
netdev_features_t features)
|
||||
{
|
||||
struct stmmac_priv *priv = netdev_priv(netdev);
|
||||
|
||||
/* Keep the COE Type in case of csum is supporting */
|
||||
if (features & NETIF_F_RXCSUM)
|
||||
priv->hw->rx_csum = priv->plat->rx_coe;
|
||||
else
|
||||
priv->hw->rx_csum = 0;
|
||||
/* No check needed because rx_coe has been set before and it will be
|
||||
* fixed in case of issue.
|
||||
*/
|
||||
priv->hw->mac->rx_ipc(priv->hw);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* stmmac_interrupt - main ISR
|
||||
* @irq: interrupt number.
|
||||
@ -2572,6 +2646,7 @@ static const struct net_device_ops stmmac_netdev_ops = {
|
||||
.ndo_stop = stmmac_release,
|
||||
.ndo_change_mtu = stmmac_change_mtu,
|
||||
.ndo_fix_features = stmmac_fix_features,
|
||||
.ndo_set_features = stmmac_set_features,
|
||||
.ndo_set_rx_mode = stmmac_set_rx_mode,
|
||||
.ndo_tx_timeout = stmmac_tx_timeout,
|
||||
.ndo_do_ioctl = stmmac_ioctl,
|
||||
@ -2592,7 +2667,6 @@ static const struct net_device_ops stmmac_netdev_ops = {
|
||||
*/
|
||||
static int stmmac_hw_init(struct stmmac_priv *priv)
|
||||
{
|
||||
int ret;
|
||||
struct mac_device_info *mac;
|
||||
|
||||
/* Identify the MAC HW device */
|
||||
@ -2649,15 +2723,11 @@ static int stmmac_hw_init(struct stmmac_priv *priv)
|
||||
/* To use alternate (extended) or normal descriptor structures */
|
||||
stmmac_selec_desc_mode(priv);
|
||||
|
||||
ret = priv->hw->mac->rx_ipc(priv->hw);
|
||||
if (!ret) {
|
||||
pr_warn(" RX IPC Checksum Offload not configured.\n");
|
||||
priv->plat->rx_coe = STMMAC_RX_COE_NONE;
|
||||
}
|
||||
|
||||
if (priv->plat->rx_coe)
|
||||
if (priv->plat->rx_coe) {
|
||||
priv->hw->rx_csum = priv->plat->rx_coe;
|
||||
pr_info(" RX Checksum Offload Engine supported (type %d)\n",
|
||||
priv->plat->rx_coe);
|
||||
}
|
||||
if (priv->plat->tx_coe)
|
||||
pr_info(" TX Checksum insertion supported\n");
|
||||
|
||||
|
@ -206,6 +206,7 @@ void stmmac_ptp_unregister(struct stmmac_priv *priv)
|
||||
{
|
||||
if (priv->ptp_clock) {
|
||||
ptp_clock_unregister(priv->ptp_clock);
|
||||
priv->ptp_clock = NULL;
|
||||
pr_debug("Removed PTP HW clock successfully on %s\n",
|
||||
priv->dev->name);
|
||||
}
|
||||
|
@ -25,8 +25,6 @@
|
||||
#ifndef __STMMAC_PTP_H__
|
||||
#define __STMMAC_PTP_H__
|
||||
|
||||
#define STMMAC_SYSCLOCK 62500000
|
||||
|
||||
/* IEEE 1588 PTP register offsets */
|
||||
#define PTP_TCR 0x0700 /* Timestamp Control Reg */
|
||||
#define PTP_SSIR 0x0704 /* Sub-Second Increment Reg */
|
||||
|
@ -147,11 +147,6 @@
|
||||
#define PCI_MEM64BIT (2<<1) /* Base addr anywhere in 64 Bit range */
|
||||
#define PCI_MEMSPACE 0x00000001L /* Bit 0: Memory Space Indic. */
|
||||
|
||||
/* PCI_BASE_2ND 32 bit 2nd Base address */
|
||||
#define PCI_IOBASE 0xffffff00L /* Bit 31..8: I/O Base address */
|
||||
#define PCI_IOSIZE 0x000000fcL /* Bit 7..2: I/O Size Requirements */
|
||||
#define PCI_IOSPACE 0x00000001L /* Bit 0: I/O Space Indicator */
|
||||
|
||||
/* PCI_SUB_VID 16 bit Subsystem Vendor ID */
|
||||
/* PCI_SUB_ID 16 bit Subsystem ID */
|
||||
|
||||
|
@ -1036,31 +1036,31 @@ int phy_init_eee(struct phy_device *phydev, bool clk_stop_enable)
|
||||
/* First check if the EEE ability is supported */
|
||||
eee_cap = phy_read_mmd_indirect(phydev, MDIO_PCS_EEE_ABLE,
|
||||
MDIO_MMD_PCS, phydev->addr);
|
||||
if (eee_cap < 0)
|
||||
return eee_cap;
|
||||
if (eee_cap <= 0)
|
||||
goto eee_exit_err;
|
||||
|
||||
cap = mmd_eee_cap_to_ethtool_sup_t(eee_cap);
|
||||
if (!cap)
|
||||
return -EPROTONOSUPPORT;
|
||||
goto eee_exit_err;
|
||||
|
||||
/* Check which link settings negotiated and verify it in
|
||||
* the EEE advertising registers.
|
||||
*/
|
||||
eee_lp = phy_read_mmd_indirect(phydev, MDIO_AN_EEE_LPABLE,
|
||||
MDIO_MMD_AN, phydev->addr);
|
||||
if (eee_lp < 0)
|
||||
return eee_lp;
|
||||
if (eee_lp <= 0)
|
||||
goto eee_exit_err;
|
||||
|
||||
eee_adv = phy_read_mmd_indirect(phydev, MDIO_AN_EEE_ADV,
|
||||
MDIO_MMD_AN, phydev->addr);
|
||||
if (eee_adv < 0)
|
||||
return eee_adv;
|
||||
if (eee_adv <= 0)
|
||||
goto eee_exit_err;
|
||||
|
||||
adv = mmd_eee_adv_to_ethtool_adv_t(eee_adv);
|
||||
lp = mmd_eee_adv_to_ethtool_adv_t(eee_lp);
|
||||
idx = phy_find_setting(phydev->speed, phydev->duplex);
|
||||
if (!(lp & adv & settings[idx].setting))
|
||||
return -EPROTONOSUPPORT;
|
||||
goto eee_exit_err;
|
||||
|
||||
if (clk_stop_enable) {
|
||||
/* Configure the PHY to stop receiving xMII
|
||||
@ -1080,7 +1080,7 @@ int phy_init_eee(struct phy_device *phydev, bool clk_stop_enable)
|
||||
|
||||
return 0; /* EEE supported */
|
||||
}
|
||||
|
||||
eee_exit_err:
|
||||
return -EPROTONOSUPPORT;
|
||||
}
|
||||
EXPORT_SYMBOL(phy_init_eee);
|
||||
|
@ -2056,7 +2056,6 @@ vmxnet3_set_mc(struct net_device *netdev)
|
||||
if (!netdev_mc_empty(netdev)) {
|
||||
new_table = vmxnet3_copy_mc(netdev);
|
||||
if (new_table) {
|
||||
new_mode |= VMXNET3_RXM_MCAST;
|
||||
rxConf->mfTableLen = cpu_to_le16(
|
||||
netdev_mc_count(netdev) * ETH_ALEN);
|
||||
new_table_pa = dma_map_single(
|
||||
@ -2064,15 +2063,18 @@ vmxnet3_set_mc(struct net_device *netdev)
|
||||
new_table,
|
||||
rxConf->mfTableLen,
|
||||
PCI_DMA_TODEVICE);
|
||||
}
|
||||
|
||||
if (new_table_pa) {
|
||||
new_mode |= VMXNET3_RXM_MCAST;
|
||||
rxConf->mfTablePA = cpu_to_le64(new_table_pa);
|
||||
} else {
|
||||
netdev_info(netdev, "failed to copy mcast list"
|
||||
", setting ALL_MULTI\n");
|
||||
netdev_info(netdev,
|
||||
"failed to copy mcast list, setting ALL_MULTI\n");
|
||||
new_mode |= VMXNET3_RXM_ALL_MULTI;
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
if (!(new_mode & VMXNET3_RXM_MCAST)) {
|
||||
rxConf->mfTableLen = 0;
|
||||
rxConf->mfTablePA = 0;
|
||||
@ -2091,11 +2093,10 @@ vmxnet3_set_mc(struct net_device *netdev)
|
||||
VMXNET3_CMD_UPDATE_MAC_FILTERS);
|
||||
spin_unlock_irqrestore(&adapter->cmd_lock, flags);
|
||||
|
||||
if (new_table) {
|
||||
if (new_table_pa)
|
||||
dma_unmap_single(&adapter->pdev->dev, new_table_pa,
|
||||
rxConf->mfTableLen, PCI_DMA_TODEVICE);
|
||||
kfree(new_table);
|
||||
}
|
||||
kfree(new_table);
|
||||
}
|
||||
|
||||
void
|
||||
|
@ -69,10 +69,10 @@
|
||||
/*
|
||||
* Version numbers
|
||||
*/
|
||||
#define VMXNET3_DRIVER_VERSION_STRING "1.2.0.0-k"
|
||||
#define VMXNET3_DRIVER_VERSION_STRING "1.2.1.0-k"
|
||||
|
||||
/* a 32-bit int, each byte encode a verion number in VMXNET3_DRIVER_VERSION */
|
||||
#define VMXNET3_DRIVER_VERSION_NUM 0x01020000
|
||||
#define VMXNET3_DRIVER_VERSION_NUM 0x01020100
|
||||
|
||||
#if defined(CONFIG_PCI_MSI)
|
||||
/* RSS only makes sense if MSI-X is supported. */
|
||||
|
@ -1327,7 +1327,7 @@ static int arp_reduce(struct net_device *dev, struct sk_buff *skb)
|
||||
} else if (vxlan->flags & VXLAN_F_L3MISS) {
|
||||
union vxlan_addr ipa = {
|
||||
.sin.sin_addr.s_addr = tip,
|
||||
.sa.sa_family = AF_INET,
|
||||
.sin.sin_family = AF_INET,
|
||||
};
|
||||
|
||||
vxlan_ip_miss(dev, &ipa);
|
||||
@ -1488,7 +1488,7 @@ static int neigh_reduce(struct net_device *dev, struct sk_buff *skb)
|
||||
} else if (vxlan->flags & VXLAN_F_L3MISS) {
|
||||
union vxlan_addr ipa = {
|
||||
.sin6.sin6_addr = msg->target,
|
||||
.sa.sa_family = AF_INET6,
|
||||
.sin6.sin6_family = AF_INET6,
|
||||
};
|
||||
|
||||
vxlan_ip_miss(dev, &ipa);
|
||||
@ -1521,7 +1521,7 @@ static bool route_shortcircuit(struct net_device *dev, struct sk_buff *skb)
|
||||
if (!n && (vxlan->flags & VXLAN_F_L3MISS)) {
|
||||
union vxlan_addr ipa = {
|
||||
.sin.sin_addr.s_addr = pip->daddr,
|
||||
.sa.sa_family = AF_INET,
|
||||
.sin.sin_family = AF_INET,
|
||||
};
|
||||
|
||||
vxlan_ip_miss(dev, &ipa);
|
||||
@ -1542,7 +1542,7 @@ static bool route_shortcircuit(struct net_device *dev, struct sk_buff *skb)
|
||||
if (!n && (vxlan->flags & VXLAN_F_L3MISS)) {
|
||||
union vxlan_addr ipa = {
|
||||
.sin6.sin6_addr = pip6->daddr,
|
||||
.sa.sa_family = AF_INET6,
|
||||
.sin6.sin6_family = AF_INET6,
|
||||
};
|
||||
|
||||
vxlan_ip_miss(dev, &ipa);
|
||||
|
@ -2423,8 +2423,6 @@ static void at76_delete_device(struct at76_priv *priv)
|
||||
|
||||
kfree_skb(priv->rx_skb);
|
||||
|
||||
usb_put_dev(priv->udev);
|
||||
|
||||
at76_dbg(DBG_PROC_ENTRY, "%s: before freeing priv/ieee80211_hw",
|
||||
__func__);
|
||||
ieee80211_free_hw(priv->hw);
|
||||
@ -2558,6 +2556,7 @@ static void at76_disconnect(struct usb_interface *interface)
|
||||
|
||||
wiphy_info(priv->hw->wiphy, "disconnecting\n");
|
||||
at76_delete_device(priv);
|
||||
usb_put_dev(priv->udev);
|
||||
dev_info(&interface->dev, "disconnected\n");
|
||||
}
|
||||
|
||||
|
@ -253,7 +253,7 @@ static ssize_t write_file_spec_scan_ctl(struct file *file,
|
||||
|
||||
if (strncmp("trigger", buf, 7) == 0) {
|
||||
ath9k_spectral_scan_trigger(sc->hw);
|
||||
} else if (strncmp("background", buf, 9) == 0) {
|
||||
} else if (strncmp("background", buf, 10) == 0) {
|
||||
ath9k_spectral_scan_config(sc->hw, SPECTRAL_BACKGROUND);
|
||||
ath_dbg(common, CONFIG, "spectral scan: background mode enabled\n");
|
||||
} else if (strncmp("chanscan", buf, 8) == 0) {
|
||||
|
@ -51,7 +51,6 @@ config IWLWIFI_LEDS
|
||||
|
||||
config IWLDVM
|
||||
tristate "Intel Wireless WiFi DVM Firmware support"
|
||||
depends on m
|
||||
default IWLWIFI
|
||||
help
|
||||
This is the driver that supports the DVM firmware which is
|
||||
@ -60,7 +59,6 @@ config IWLDVM
|
||||
|
||||
config IWLMVM
|
||||
tristate "Intel Wireless WiFi MVM Firmware support"
|
||||
depends on m
|
||||
help
|
||||
This is the driver that supports the MVM firmware which is
|
||||
currently only available for 7260 and 3160 devices.
|
||||
|
@ -1068,6 +1068,13 @@ int iwlagn_commit_rxon(struct iwl_priv *priv, struct iwl_rxon_context *ctx)
|
||||
/* recalculate basic rates */
|
||||
iwl_calc_basic_rates(priv, ctx);
|
||||
|
||||
/*
|
||||
* force CTS-to-self frames protection if RTS-CTS is not preferred
|
||||
* one aggregation protection method
|
||||
*/
|
||||
if (!priv->hw_params.use_rts_for_aggregation)
|
||||
ctx->staging.flags |= RXON_FLG_SELF_CTS_EN;
|
||||
|
||||
if ((ctx->vif && ctx->vif->bss_conf.use_short_slot) ||
|
||||
!(ctx->staging.flags & RXON_FLG_BAND_24G_MSK))
|
||||
ctx->staging.flags |= RXON_FLG_SHORT_SLOT_MSK;
|
||||
@ -1473,6 +1480,11 @@ void iwlagn_bss_info_changed(struct ieee80211_hw *hw,
|
||||
else
|
||||
ctx->staging.flags &= ~RXON_FLG_TGG_PROTECT_MSK;
|
||||
|
||||
if (bss_conf->use_cts_prot)
|
||||
ctx->staging.flags |= RXON_FLG_SELF_CTS_EN;
|
||||
else
|
||||
ctx->staging.flags &= ~RXON_FLG_SELF_CTS_EN;
|
||||
|
||||
memcpy(ctx->staging.bssid_addr, bss_conf->bssid, ETH_ALEN);
|
||||
|
||||
if (vif->type == NL80211_IFTYPE_AP ||
|
||||
|
@ -67,8 +67,8 @@
|
||||
#include "iwl-agn-hw.h"
|
||||
|
||||
/* Highest firmware API version supported */
|
||||
#define IWL7260_UCODE_API_MAX 9
|
||||
#define IWL3160_UCODE_API_MAX 9
|
||||
#define IWL7260_UCODE_API_MAX 10
|
||||
#define IWL3160_UCODE_API_MAX 10
|
||||
|
||||
/* Oldest version we won't warn about */
|
||||
#define IWL7260_UCODE_API_OK 9
|
||||
|
@ -67,7 +67,7 @@
|
||||
#include "iwl-agn-hw.h"
|
||||
|
||||
/* Highest firmware API version supported */
|
||||
#define IWL8000_UCODE_API_MAX 9
|
||||
#define IWL8000_UCODE_API_MAX 10
|
||||
|
||||
/* Oldest version we won't warn about */
|
||||
#define IWL8000_UCODE_API_OK 8
|
||||
|
@ -101,7 +101,7 @@ static bool halbtc_legacy(struct rtl_priv *adapter)
|
||||
|
||||
bool is_legacy = false;
|
||||
|
||||
if ((mac->mode == WIRELESS_MODE_B) || (mac->mode == WIRELESS_MODE_B))
|
||||
if ((mac->mode == WIRELESS_MODE_B) || (mac->mode == WIRELESS_MODE_G))
|
||||
is_legacy = true;
|
||||
|
||||
return is_legacy;
|
||||
|
@ -317,6 +317,7 @@ static struct usb_device_id rtl8192c_usb_ids[] = {
|
||||
{RTL_USB_DEVICE(0x0bda, 0x5088, rtl92cu_hal_cfg)}, /*Thinkware-CC&C*/
|
||||
{RTL_USB_DEVICE(0x0df6, 0x0052, rtl92cu_hal_cfg)}, /*Sitecom - Edimax*/
|
||||
{RTL_USB_DEVICE(0x0df6, 0x005c, rtl92cu_hal_cfg)}, /*Sitecom - Edimax*/
|
||||
{RTL_USB_DEVICE(0x0df6, 0x0070, rtl92cu_hal_cfg)}, /*Sitecom - 150N */
|
||||
{RTL_USB_DEVICE(0x0df6, 0x0077, rtl92cu_hal_cfg)}, /*Sitecom-WLA2100V2*/
|
||||
{RTL_USB_DEVICE(0x0eb0, 0x9071, rtl92cu_hal_cfg)}, /*NO Brand - Etop*/
|
||||
{RTL_USB_DEVICE(0x4856, 0x0091, rtl92cu_hal_cfg)}, /*NetweeN - Feixun*/
|
||||
|
@ -576,6 +576,9 @@ int xenvif_connect(struct xenvif_queue *queue, unsigned long tx_ring_ref,
|
||||
init_waitqueue_head(&queue->dealloc_wq);
|
||||
atomic_set(&queue->inflight_packets, 0);
|
||||
|
||||
netif_napi_add(queue->vif->dev, &queue->napi, xenvif_poll,
|
||||
XENVIF_NAPI_WEIGHT);
|
||||
|
||||
if (tx_evtchn == rx_evtchn) {
|
||||
/* feature-split-event-channels == 0 */
|
||||
err = bind_interdomain_evtchn_to_irqhandler(
|
||||
@ -629,9 +632,6 @@ int xenvif_connect(struct xenvif_queue *queue, unsigned long tx_ring_ref,
|
||||
wake_up_process(queue->task);
|
||||
wake_up_process(queue->dealloc_task);
|
||||
|
||||
netif_napi_add(queue->vif->dev, &queue->napi, xenvif_poll,
|
||||
XENVIF_NAPI_WEIGHT);
|
||||
|
||||
return 0;
|
||||
|
||||
err_rx_unbind:
|
||||
|
@ -889,6 +889,7 @@ extern const struct attribute_group *qeth_generic_attr_groups[];
|
||||
extern const struct attribute_group *qeth_osn_attr_groups[];
|
||||
extern struct workqueue_struct *qeth_wq;
|
||||
|
||||
int qeth_card_hw_is_reachable(struct qeth_card *);
|
||||
const char *qeth_get_cardname_short(struct qeth_card *);
|
||||
int qeth_realloc_buffer_pool(struct qeth_card *, int);
|
||||
int qeth_core_load_discipline(struct qeth_card *, enum qeth_discipline_id);
|
||||
|
@ -73,6 +73,13 @@ static int qeth_init_qdio_out_buf(struct qeth_qdio_out_q *, int);
|
||||
struct workqueue_struct *qeth_wq;
|
||||
EXPORT_SYMBOL_GPL(qeth_wq);
|
||||
|
||||
int qeth_card_hw_is_reachable(struct qeth_card *card)
|
||||
{
|
||||
return (card->state == CARD_STATE_SOFTSETUP) ||
|
||||
(card->state == CARD_STATE_UP);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(qeth_card_hw_is_reachable);
|
||||
|
||||
static void qeth_close_dev_handler(struct work_struct *work)
|
||||
{
|
||||
struct qeth_card *card;
|
||||
@ -5790,6 +5797,7 @@ int qeth_core_ethtool_get_settings(struct net_device *netdev,
|
||||
struct qeth_card *card = netdev->ml_priv;
|
||||
enum qeth_link_types link_type;
|
||||
struct carrier_info carrier_info;
|
||||
int rc;
|
||||
u32 speed;
|
||||
|
||||
if ((card->info.type == QETH_CARD_TYPE_IQD) || (card->info.guestlan))
|
||||
@ -5832,8 +5840,14 @@ int qeth_core_ethtool_get_settings(struct net_device *netdev,
|
||||
/* Check if we can obtain more accurate information. */
|
||||
/* If QUERY_CARD_INFO command is not supported or fails, */
|
||||
/* just return the heuristics that was filled above. */
|
||||
if (qeth_query_card_info(card, &carrier_info) != 0)
|
||||
if (!qeth_card_hw_is_reachable(card))
|
||||
return -ENODEV;
|
||||
rc = qeth_query_card_info(card, &carrier_info);
|
||||
if (rc == -EOPNOTSUPP) /* for old hardware, return heuristic */
|
||||
return 0;
|
||||
if (rc) /* report error from the hardware operation */
|
||||
return rc;
|
||||
/* on success, fill in the information got from the hardware */
|
||||
|
||||
netdev_dbg(netdev,
|
||||
"card info: card_type=0x%02x, port_mode=0x%04x, port_speed=0x%08x\n",
|
||||
|
@ -5,17 +5,12 @@
|
||||
|
||||
#include <linux/slab.h>
|
||||
#include <asm/ebcdic.h>
|
||||
#include "qeth_core.h"
|
||||
#include "qeth_l2.h"
|
||||
|
||||
#define QETH_DEVICE_ATTR(_id, _name, _mode, _show, _store) \
|
||||
struct device_attribute dev_attr_##_id = __ATTR(_name, _mode, _show, _store)
|
||||
|
||||
static int qeth_card_hw_is_reachable(struct qeth_card *card)
|
||||
{
|
||||
return (card->state == CARD_STATE_SOFTSETUP) ||
|
||||
(card->state == CARD_STATE_UP);
|
||||
}
|
||||
|
||||
static ssize_t qeth_bridge_port_role_state_show(struct device *dev,
|
||||
struct device_attribute *attr, char *buf,
|
||||
int show_state)
|
||||
|
@ -38,6 +38,7 @@ static const struct pci_device_id b43_pci_bridge_tbl[] = {
|
||||
{ PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, 0x432b) },
|
||||
{ PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, 0x432c) },
|
||||
{ PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, 0x4350) },
|
||||
{ PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, 0x4351) },
|
||||
{ 0, },
|
||||
};
|
||||
MODULE_DEVICE_TABLE(pci, b43_pci_bridge_tbl);
|
||||
|
@ -1196,6 +1196,9 @@ int mlx4_map_sw_to_hw_steering_id(struct mlx4_dev *dev,
|
||||
enum mlx4_net_trans_rule_id id);
|
||||
int mlx4_hw_rule_sz(struct mlx4_dev *dev, enum mlx4_net_trans_rule_id id);
|
||||
|
||||
int mlx4_tunnel_steer_add(struct mlx4_dev *dev, unsigned char *addr,
|
||||
int port, int qpn, u16 prio, u64 *reg_id);
|
||||
|
||||
void mlx4_sync_pkey_table(struct mlx4_dev *dev, int slave, int port,
|
||||
int i, int val);
|
||||
|
||||
|
@ -3176,7 +3176,7 @@ static inline int __dev_uc_sync(struct net_device *dev,
|
||||
}
|
||||
|
||||
/**
|
||||
* __dev_uc_unsync - Remove synchonized addresses from device
|
||||
* __dev_uc_unsync - Remove synchronized addresses from device
|
||||
* @dev: device to sync
|
||||
* @unsync: function to call if address should be removed
|
||||
*
|
||||
@ -3220,7 +3220,7 @@ static inline int __dev_mc_sync(struct net_device *dev,
|
||||
}
|
||||
|
||||
/**
|
||||
* __dev_mc_unsync - Remove synchonized addresses from device
|
||||
* __dev_mc_unsync - Remove synchronized addresses from device
|
||||
* @dev: device to sync
|
||||
* @unsync: function to call if address should be removed
|
||||
*
|
||||
|
@ -9,6 +9,7 @@
|
||||
#include <linux/in6.h>
|
||||
#include <linux/wait.h>
|
||||
#include <linux/list.h>
|
||||
#include <linux/static_key.h>
|
||||
#include <uapi/linux/netfilter.h>
|
||||
#ifdef CONFIG_NETFILTER
|
||||
static inline int NF_DROP_GETERR(int verdict)
|
||||
@ -99,9 +100,9 @@ void nf_unregister_sockopt(struct nf_sockopt_ops *reg);
|
||||
|
||||
extern struct list_head nf_hooks[NFPROTO_NUMPROTO][NF_MAX_HOOKS];
|
||||
|
||||
#if defined(CONFIG_JUMP_LABEL)
|
||||
#include <linux/static_key.h>
|
||||
#ifdef HAVE_JUMP_LABEL
|
||||
extern struct static_key nf_hooks_needed[NFPROTO_NUMPROTO][NF_MAX_HOOKS];
|
||||
|
||||
static inline bool nf_hooks_active(u_int8_t pf, unsigned int hook)
|
||||
{
|
||||
if (__builtin_constant_p(pf) &&
|
||||
|
@ -464,6 +464,8 @@ struct hci_conn_params {
|
||||
HCI_AUTO_CONN_ALWAYS,
|
||||
HCI_AUTO_CONN_LINK_LOSS,
|
||||
} auto_connect;
|
||||
|
||||
struct hci_conn *conn;
|
||||
};
|
||||
|
||||
extern struct list_head hci_dev_list;
|
||||
|
@ -16,7 +16,6 @@ struct netns_sysctl_lowpan {
|
||||
struct netns_ieee802154_lowpan {
|
||||
struct netns_sysctl_lowpan sysctl;
|
||||
struct netns_frags frags;
|
||||
int max_dsize;
|
||||
};
|
||||
|
||||
#endif
|
||||
|
@ -167,7 +167,7 @@ struct ieee80211_reg_rule {
|
||||
struct ieee80211_regdomain {
|
||||
struct rcu_head rcu_head;
|
||||
u32 n_reg_rules;
|
||||
char alpha2[2];
|
||||
char alpha2[3];
|
||||
enum nl80211_dfs_regions dfs_region;
|
||||
struct ieee80211_reg_rule reg_rules[];
|
||||
};
|
||||
|
@ -320,6 +320,19 @@ static inline sctp_assoc_t sctp_assoc2id(const struct sctp_association *asoc)
|
||||
return asoc ? asoc->assoc_id : 0;
|
||||
}
|
||||
|
||||
static inline enum sctp_sstat_state
|
||||
sctp_assoc_to_state(const struct sctp_association *asoc)
|
||||
{
|
||||
/* SCTP's uapi always had SCTP_EMPTY(=0) as a dummy state, but we
|
||||
* got rid of it in kernel space. Therefore SCTP_CLOSED et al
|
||||
* start at =1 in user space, but actually as =0 in kernel space.
|
||||
* Now that we can not break user space and SCTP_EMPTY is exposed
|
||||
* there, we need to fix it up with an ugly offset not to break
|
||||
* applications. :(
|
||||
*/
|
||||
return asoc->state + 1;
|
||||
}
|
||||
|
||||
/* Look up the association by its id. */
|
||||
struct sctp_association *sctp_id2assoc(struct sock *sk, sctp_assoc_t id);
|
||||
|
||||
|
@ -2165,9 +2165,7 @@ sock_recv_timestamp(struct msghdr *msg, struct sock *sk, struct sk_buff *skb)
|
||||
*/
|
||||
if (sock_flag(sk, SOCK_RCVTSTAMP) ||
|
||||
(sk->sk_tsflags & SOF_TIMESTAMPING_RX_SOFTWARE) ||
|
||||
(kt.tv64 &&
|
||||
(sk->sk_tsflags & SOF_TIMESTAMPING_SOFTWARE ||
|
||||
skb_shinfo(skb)->tx_flags & SKBTX_ANY_SW_TSTAMP)) ||
|
||||
(kt.tv64 && sk->sk_tsflags & SOF_TIMESTAMPING_SOFTWARE) ||
|
||||
(hwtstamps->hwtstamp.tv64 &&
|
||||
(sk->sk_tsflags & SOF_TIMESTAMPING_RAW_HARDWARE)))
|
||||
__sock_recv_timestamp(msg, sk, skb);
|
||||
|
@ -290,7 +290,7 @@ struct wimax_dev;
|
||||
* This operation has to be synchronous, and return only when the
|
||||
* reset is complete. In case of having had to resort to bus/cold
|
||||
* reset implying a device disconnection, the call is allowed to
|
||||
* return inmediately.
|
||||
* return immediately.
|
||||
* NOTE: wimax_dev->mutex is NOT locked when this op is being
|
||||
* called; however, wimax_dev->mutex_reset IS locked to ensure
|
||||
* serialization of calls to wimax_reset().
|
||||
|
@ -107,7 +107,7 @@ DECLARE_EVENT_CLASS(softirq,
|
||||
* @vec_nr: softirq vector number
|
||||
*
|
||||
* When used in combination with the softirq_exit tracepoint
|
||||
* we can determine the softirq handler runtine.
|
||||
* we can determine the softirq handler routine.
|
||||
*/
|
||||
DEFINE_EVENT(softirq, softirq_entry,
|
||||
|
||||
@ -121,7 +121,7 @@ DEFINE_EVENT(softirq, softirq_entry,
|
||||
* @vec_nr: softirq vector number
|
||||
*
|
||||
* When used in combination with the softirq_entry tracepoint
|
||||
* we can determine the softirq handler runtine.
|
||||
* we can determine the softirq handler routine.
|
||||
*/
|
||||
DEFINE_EVENT(softirq, softirq_exit,
|
||||
|
||||
|
@ -589,6 +589,14 @@ EXPORT_SYMBOL(hci_get_route);
|
||||
void hci_le_conn_failed(struct hci_conn *conn, u8 status)
|
||||
{
|
||||
struct hci_dev *hdev = conn->hdev;
|
||||
struct hci_conn_params *params;
|
||||
|
||||
params = hci_pend_le_action_lookup(&hdev->pend_le_conns, &conn->dst,
|
||||
conn->dst_type);
|
||||
if (params && params->conn) {
|
||||
hci_conn_drop(params->conn);
|
||||
params->conn = NULL;
|
||||
}
|
||||
|
||||
conn->state = BT_CLOSED;
|
||||
|
||||
|
@ -2536,8 +2536,13 @@ static void hci_pend_le_actions_clear(struct hci_dev *hdev)
|
||||
{
|
||||
struct hci_conn_params *p;
|
||||
|
||||
list_for_each_entry(p, &hdev->le_conn_params, list)
|
||||
list_for_each_entry(p, &hdev->le_conn_params, list) {
|
||||
if (p->conn) {
|
||||
hci_conn_drop(p->conn);
|
||||
p->conn = NULL;
|
||||
}
|
||||
list_del_init(&p->action);
|
||||
}
|
||||
|
||||
BT_DBG("All LE pending actions cleared");
|
||||
}
|
||||
@ -2578,8 +2583,8 @@ static int hci_dev_do_close(struct hci_dev *hdev)
|
||||
|
||||
hci_dev_lock(hdev);
|
||||
hci_inquiry_cache_flush(hdev);
|
||||
hci_conn_hash_flush(hdev);
|
||||
hci_pend_le_actions_clear(hdev);
|
||||
hci_conn_hash_flush(hdev);
|
||||
hci_dev_unlock(hdev);
|
||||
|
||||
hci_notify(hdev, HCI_DEV_DOWN);
|
||||
@ -3727,6 +3732,9 @@ void hci_conn_params_del(struct hci_dev *hdev, bdaddr_t *addr, u8 addr_type)
|
||||
if (!params)
|
||||
return;
|
||||
|
||||
if (params->conn)
|
||||
hci_conn_drop(params->conn);
|
||||
|
||||
list_del(¶ms->action);
|
||||
list_del(¶ms->list);
|
||||
kfree(params);
|
||||
@ -3757,6 +3765,8 @@ void hci_conn_params_clear_all(struct hci_dev *hdev)
|
||||
struct hci_conn_params *params, *tmp;
|
||||
|
||||
list_for_each_entry_safe(params, tmp, &hdev->le_conn_params, list) {
|
||||
if (params->conn)
|
||||
hci_conn_drop(params->conn);
|
||||
list_del(¶ms->action);
|
||||
list_del(¶ms->list);
|
||||
kfree(params);
|
||||
|
@ -4221,8 +4221,13 @@ static void hci_le_conn_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
|
||||
hci_proto_connect_cfm(conn, ev->status);
|
||||
|
||||
params = hci_conn_params_lookup(hdev, &conn->dst, conn->dst_type);
|
||||
if (params)
|
||||
if (params) {
|
||||
list_del_init(¶ms->action);
|
||||
if (params->conn) {
|
||||
hci_conn_drop(params->conn);
|
||||
params->conn = NULL;
|
||||
}
|
||||
}
|
||||
|
||||
unlock:
|
||||
hci_update_background_scan(hdev);
|
||||
@ -4304,8 +4309,16 @@ static void check_pending_le_conn(struct hci_dev *hdev, bdaddr_t *addr,
|
||||
|
||||
conn = hci_connect_le(hdev, addr, addr_type, BT_SECURITY_LOW,
|
||||
HCI_LE_AUTOCONN_TIMEOUT, HCI_ROLE_MASTER);
|
||||
if (!IS_ERR(conn))
|
||||
if (!IS_ERR(conn)) {
|
||||
/* Store the pointer since we don't really have any
|
||||
* other owner of the object besides the params that
|
||||
* triggered it. This way we can abort the connection if
|
||||
* the parameters get removed and keep the reference
|
||||
* count consistent once the connection is established.
|
||||
*/
|
||||
params->conn = conn;
|
||||
return;
|
||||
}
|
||||
|
||||
switch (PTR_ERR(conn)) {
|
||||
case -EBUSY:
|
||||
|
@ -775,7 +775,7 @@ __sum16 __skb_checksum_complete(struct sk_buff *skb)
|
||||
EXPORT_SYMBOL(__skb_checksum_complete);
|
||||
|
||||
/**
|
||||
* skb_copy_and_csum_datagram_iovec - Copy and checkum skb to user iovec.
|
||||
* skb_copy_and_csum_datagram_iovec - Copy and checksum skb to user iovec.
|
||||
* @skb: skbuff
|
||||
* @hlen: hardware length
|
||||
* @iov: io vector
|
||||
|
@ -2587,13 +2587,19 @@ netdev_features_t netif_skb_features(struct sk_buff *skb)
|
||||
return harmonize_features(skb, features);
|
||||
}
|
||||
|
||||
features &= (skb->dev->vlan_features | NETIF_F_HW_VLAN_CTAG_TX |
|
||||
NETIF_F_HW_VLAN_STAG_TX);
|
||||
features = netdev_intersect_features(features,
|
||||
skb->dev->vlan_features |
|
||||
NETIF_F_HW_VLAN_CTAG_TX |
|
||||
NETIF_F_HW_VLAN_STAG_TX);
|
||||
|
||||
if (protocol == htons(ETH_P_8021Q) || protocol == htons(ETH_P_8021AD))
|
||||
features &= NETIF_F_SG | NETIF_F_HIGHDMA | NETIF_F_FRAGLIST |
|
||||
NETIF_F_GEN_CSUM | NETIF_F_HW_VLAN_CTAG_TX |
|
||||
NETIF_F_HW_VLAN_STAG_TX;
|
||||
features = netdev_intersect_features(features,
|
||||
NETIF_F_SG |
|
||||
NETIF_F_HIGHDMA |
|
||||
NETIF_F_FRAGLIST |
|
||||
NETIF_F_GEN_CSUM |
|
||||
NETIF_F_HW_VLAN_CTAG_TX |
|
||||
NETIF_F_HW_VLAN_STAG_TX);
|
||||
|
||||
return harmonize_features(skb, features);
|
||||
}
|
||||
@ -4889,7 +4895,8 @@ static void __netdev_adjacent_dev_remove(struct net_device *dev,
|
||||
if (adj->master)
|
||||
sysfs_remove_link(&(dev->dev.kobj), "master");
|
||||
|
||||
if (netdev_adjacent_is_neigh_list(dev, dev_list))
|
||||
if (netdev_adjacent_is_neigh_list(dev, dev_list) &&
|
||||
net_eq(dev_net(dev),dev_net(adj_dev)))
|
||||
netdev_adjacent_sysfs_del(dev, adj_dev->name, dev_list);
|
||||
|
||||
list_del_rcu(&adj->list);
|
||||
@ -5159,11 +5166,65 @@ void netdev_upper_dev_unlink(struct net_device *dev,
|
||||
}
|
||||
EXPORT_SYMBOL(netdev_upper_dev_unlink);
|
||||
|
||||
void netdev_adjacent_add_links(struct net_device *dev)
|
||||
{
|
||||
struct netdev_adjacent *iter;
|
||||
|
||||
struct net *net = dev_net(dev);
|
||||
|
||||
list_for_each_entry(iter, &dev->adj_list.upper, list) {
|
||||
if (!net_eq(net,dev_net(iter->dev)))
|
||||
continue;
|
||||
netdev_adjacent_sysfs_add(iter->dev, dev,
|
||||
&iter->dev->adj_list.lower);
|
||||
netdev_adjacent_sysfs_add(dev, iter->dev,
|
||||
&dev->adj_list.upper);
|
||||
}
|
||||
|
||||
list_for_each_entry(iter, &dev->adj_list.lower, list) {
|
||||
if (!net_eq(net,dev_net(iter->dev)))
|
||||
continue;
|
||||
netdev_adjacent_sysfs_add(iter->dev, dev,
|
||||
&iter->dev->adj_list.upper);
|
||||
netdev_adjacent_sysfs_add(dev, iter->dev,
|
||||
&dev->adj_list.lower);
|
||||
}
|
||||
}
|
||||
|
||||
void netdev_adjacent_del_links(struct net_device *dev)
|
||||
{
|
||||
struct netdev_adjacent *iter;
|
||||
|
||||
struct net *net = dev_net(dev);
|
||||
|
||||
list_for_each_entry(iter, &dev->adj_list.upper, list) {
|
||||
if (!net_eq(net,dev_net(iter->dev)))
|
||||
continue;
|
||||
netdev_adjacent_sysfs_del(iter->dev, dev->name,
|
||||
&iter->dev->adj_list.lower);
|
||||
netdev_adjacent_sysfs_del(dev, iter->dev->name,
|
||||
&dev->adj_list.upper);
|
||||
}
|
||||
|
||||
list_for_each_entry(iter, &dev->adj_list.lower, list) {
|
||||
if (!net_eq(net,dev_net(iter->dev)))
|
||||
continue;
|
||||
netdev_adjacent_sysfs_del(iter->dev, dev->name,
|
||||
&iter->dev->adj_list.upper);
|
||||
netdev_adjacent_sysfs_del(dev, iter->dev->name,
|
||||
&dev->adj_list.lower);
|
||||
}
|
||||
}
|
||||
|
||||
void netdev_adjacent_rename_links(struct net_device *dev, char *oldname)
|
||||
{
|
||||
struct netdev_adjacent *iter;
|
||||
|
||||
struct net *net = dev_net(dev);
|
||||
|
||||
list_for_each_entry(iter, &dev->adj_list.upper, list) {
|
||||
if (!net_eq(net,dev_net(iter->dev)))
|
||||
continue;
|
||||
netdev_adjacent_sysfs_del(iter->dev, oldname,
|
||||
&iter->dev->adj_list.lower);
|
||||
netdev_adjacent_sysfs_add(iter->dev, dev,
|
||||
@ -5171,6 +5232,8 @@ void netdev_adjacent_rename_links(struct net_device *dev, char *oldname)
|
||||
}
|
||||
|
||||
list_for_each_entry(iter, &dev->adj_list.lower, list) {
|
||||
if (!net_eq(net,dev_net(iter->dev)))
|
||||
continue;
|
||||
netdev_adjacent_sysfs_del(iter->dev, oldname,
|
||||
&iter->dev->adj_list.upper);
|
||||
netdev_adjacent_sysfs_add(iter->dev, dev,
|
||||
@ -6773,6 +6836,7 @@ int dev_change_net_namespace(struct net_device *dev, struct net *net, const char
|
||||
|
||||
/* Send a netdev-removed uevent to the old namespace */
|
||||
kobject_uevent(&dev->dev.kobj, KOBJ_REMOVE);
|
||||
netdev_adjacent_del_links(dev);
|
||||
|
||||
/* Actually switch the network namespace */
|
||||
dev_net_set(dev, net);
|
||||
@ -6787,6 +6851,7 @@ int dev_change_net_namespace(struct net_device *dev, struct net *net, const char
|
||||
|
||||
/* Send a netdev-add uevent to the new namespace */
|
||||
kobject_uevent(&dev->dev.kobj, KOBJ_ADD);
|
||||
netdev_adjacent_add_links(dev);
|
||||
|
||||
/* Fixup kobjects */
|
||||
err = device_rename(&dev->dev, dev->name);
|
||||
|
@ -197,7 +197,7 @@ struct gen_estimator *gen_find_node(const struct gnet_stats_basic_packed *bstats
|
||||
* as destination. A new timer with the interval specified in the
|
||||
* configuration TLV is created. Upon each interval, the latest statistics
|
||||
* will be read from &bstats and the estimated rate will be stored in
|
||||
* &rate_est with the statistics lock grabed during this period.
|
||||
* &rate_est with the statistics lock grabbed during this period.
|
||||
*
|
||||
* Returns 0 on success or a negative error code.
|
||||
*
|
||||
|
@ -206,7 +206,7 @@ EXPORT_SYMBOL(gnet_stats_copy_queue);
|
||||
* @st: application specific statistics data
|
||||
* @len: length of data
|
||||
*
|
||||
* Appends the application sepecific statistics to the top level TLV created by
|
||||
* Appends the application specific statistics to the top level TLV created by
|
||||
* gnet_stats_start_copy() and remembers the data for XSTATS if the dumping
|
||||
* handle is in backward compatibility mode.
|
||||
*
|
||||
|
@ -2647,7 +2647,7 @@ EXPORT_SYMBOL(skb_prepare_seq_read);
|
||||
* skb_seq_read() will return the remaining part of the block.
|
||||
*
|
||||
* Note 1: The size of each block of data returned can be arbitrary,
|
||||
* this limitation is the cost for zerocopy seqeuental
|
||||
* this limitation is the cost for zerocopy sequential
|
||||
* reads of potentially non linear data.
|
||||
*
|
||||
* Note 2: Fragment lists within fragments are not implemented
|
||||
@ -2781,7 +2781,7 @@ EXPORT_SYMBOL(skb_find_text);
|
||||
/**
|
||||
* skb_append_datato_frags - append the user data to a skb
|
||||
* @sk: sock structure
|
||||
* @skb: skb structure to be appened with user data.
|
||||
* @skb: skb structure to be appended with user data.
|
||||
* @getfrag: call back function to be used for getting the user data
|
||||
* @from: pointer to user message iov
|
||||
* @length: length of the iov message
|
||||
|
@ -166,7 +166,7 @@ EXPORT_SYMBOL(sk_ns_capable);
|
||||
/**
|
||||
* sk_capable - Socket global capability test
|
||||
* @sk: Socket to use a capability on or through
|
||||
* @cap: The global capbility to use
|
||||
* @cap: The global capability to use
|
||||
*
|
||||
* Test to see if the opener of the socket had when the socket was
|
||||
* created and the current process has the capability @cap in all user
|
||||
@ -183,7 +183,7 @@ EXPORT_SYMBOL(sk_capable);
|
||||
* @sk: Socket to use a capability on or through
|
||||
* @cap: The capability to use
|
||||
*
|
||||
* Test to see if the opener of the socket had when the socke was created
|
||||
* Test to see if the opener of the socket had when the socket was created
|
||||
* and the current process has the capability @cap over the network namespace
|
||||
* the socket is a member of.
|
||||
*/
|
||||
@ -1822,6 +1822,9 @@ struct sk_buff *sock_alloc_send_pskb(struct sock *sk, unsigned long header_len,
|
||||
order);
|
||||
if (page)
|
||||
goto fill_page;
|
||||
/* Do not retry other high order allocations */
|
||||
order = 1;
|
||||
max_page_order = 0;
|
||||
}
|
||||
order--;
|
||||
}
|
||||
@ -1869,10 +1872,8 @@ EXPORT_SYMBOL(sock_alloc_send_skb);
|
||||
* no guarantee that allocations succeed. Therefore, @sz MUST be
|
||||
* less or equal than PAGE_SIZE.
|
||||
*/
|
||||
bool skb_page_frag_refill(unsigned int sz, struct page_frag *pfrag, gfp_t prio)
|
||||
bool skb_page_frag_refill(unsigned int sz, struct page_frag *pfrag, gfp_t gfp)
|
||||
{
|
||||
int order;
|
||||
|
||||
if (pfrag->page) {
|
||||
if (atomic_read(&pfrag->page->_count) == 1) {
|
||||
pfrag->offset = 0;
|
||||
@ -1883,20 +1884,21 @@ bool skb_page_frag_refill(unsigned int sz, struct page_frag *pfrag, gfp_t prio)
|
||||
put_page(pfrag->page);
|
||||
}
|
||||
|
||||
order = SKB_FRAG_PAGE_ORDER;
|
||||
do {
|
||||
gfp_t gfp = prio;
|
||||
|
||||
if (order)
|
||||
gfp |= __GFP_COMP | __GFP_NOWARN | __GFP_NORETRY;
|
||||
pfrag->page = alloc_pages(gfp, order);
|
||||
pfrag->offset = 0;
|
||||
if (SKB_FRAG_PAGE_ORDER) {
|
||||
pfrag->page = alloc_pages(gfp | __GFP_COMP |
|
||||
__GFP_NOWARN | __GFP_NORETRY,
|
||||
SKB_FRAG_PAGE_ORDER);
|
||||
if (likely(pfrag->page)) {
|
||||
pfrag->offset = 0;
|
||||
pfrag->size = PAGE_SIZE << order;
|
||||
pfrag->size = PAGE_SIZE << SKB_FRAG_PAGE_ORDER;
|
||||
return true;
|
||||
}
|
||||
} while (--order >= 0);
|
||||
|
||||
}
|
||||
pfrag->page = alloc_page(gfp);
|
||||
if (likely(pfrag->page)) {
|
||||
pfrag->size = PAGE_SIZE;
|
||||
return true;
|
||||
}
|
||||
return false;
|
||||
}
|
||||
EXPORT_SYMBOL(skb_page_frag_refill);
|
||||
|
@ -246,7 +246,7 @@ lowpan_alloc_frag(struct sk_buff *skb, int size,
|
||||
return ERR_PTR(-rc);
|
||||
}
|
||||
} else {
|
||||
frag = ERR_PTR(ENOMEM);
|
||||
frag = ERR_PTR(-ENOMEM);
|
||||
}
|
||||
|
||||
return frag;
|
||||
@ -437,7 +437,7 @@ static void lowpan_setup(struct net_device *dev)
|
||||
/* Frame Control + Sequence Number + Address fields + Security Header */
|
||||
dev->hard_header_len = 2 + 1 + 20 + 14;
|
||||
dev->needed_tailroom = 2; /* FCS */
|
||||
dev->mtu = 1281;
|
||||
dev->mtu = IPV6_MIN_MTU;
|
||||
dev->tx_queue_len = 0;
|
||||
dev->flags = IFF_BROADCAST | IFF_MULTICAST;
|
||||
dev->watchdog_timeo = 0;
|
||||
|
@ -355,8 +355,6 @@ int lowpan_frag_rcv(struct sk_buff *skb, const u8 frag_type)
|
||||
struct net *net = dev_net(skb->dev);
|
||||
struct lowpan_frag_info *frag_info = lowpan_cb(skb);
|
||||
struct ieee802154_addr source, dest;
|
||||
struct netns_ieee802154_lowpan *ieee802154_lowpan =
|
||||
net_ieee802154_lowpan(net);
|
||||
int err;
|
||||
|
||||
source = mac_cb(skb)->source;
|
||||
@ -366,8 +364,10 @@ int lowpan_frag_rcv(struct sk_buff *skb, const u8 frag_type)
|
||||
if (err < 0)
|
||||
goto err;
|
||||
|
||||
if (frag_info->d_size > ieee802154_lowpan->max_dsize)
|
||||
if (frag_info->d_size > IPV6_MIN_MTU) {
|
||||
net_warn_ratelimited("lowpan_frag_rcv: datagram size exceeds MTU\n");
|
||||
goto err;
|
||||
}
|
||||
|
||||
fq = fq_find(net, frag_info, &source, &dest);
|
||||
if (fq != NULL) {
|
||||
@ -415,13 +415,6 @@ static struct ctl_table lowpan_frags_ns_ctl_table[] = {
|
||||
.mode = 0644,
|
||||
.proc_handler = proc_dointvec_jiffies,
|
||||
},
|
||||
{
|
||||
.procname = "6lowpanfrag_max_datagram_size",
|
||||
.data = &init_net.ieee802154_lowpan.max_dsize,
|
||||
.maxlen = sizeof(int),
|
||||
.mode = 0644,
|
||||
.proc_handler = proc_dointvec
|
||||
},
|
||||
{ }
|
||||
};
|
||||
|
||||
@ -458,7 +451,6 @@ static int __net_init lowpan_frags_ns_sysctl_register(struct net *net)
|
||||
table[1].data = &ieee802154_lowpan->frags.low_thresh;
|
||||
table[1].extra2 = &ieee802154_lowpan->frags.high_thresh;
|
||||
table[2].data = &ieee802154_lowpan->frags.timeout;
|
||||
table[3].data = &ieee802154_lowpan->max_dsize;
|
||||
|
||||
/* Don't export sysctls to unprivileged users */
|
||||
if (net->user_ns != &init_user_ns)
|
||||
@ -533,7 +525,6 @@ static int __net_init lowpan_frags_init_net(struct net *net)
|
||||
ieee802154_lowpan->frags.high_thresh = IPV6_FRAG_HIGH_THRESH;
|
||||
ieee802154_lowpan->frags.low_thresh = IPV6_FRAG_LOW_THRESH;
|
||||
ieee802154_lowpan->frags.timeout = IPV6_FRAG_TIMEOUT;
|
||||
ieee802154_lowpan->max_dsize = 0xFFFF;
|
||||
|
||||
inet_frags_init_net(&ieee802154_lowpan->frags);
|
||||
|
||||
|
@ -82,6 +82,52 @@ config NF_TABLES_ARP
|
||||
help
|
||||
This option enables the ARP support for nf_tables.
|
||||
|
||||
config NF_NAT_IPV4
|
||||
tristate "IPv4 NAT"
|
||||
depends on NF_CONNTRACK_IPV4
|
||||
default m if NETFILTER_ADVANCED=n
|
||||
select NF_NAT
|
||||
help
|
||||
The IPv4 NAT option allows masquerading, port forwarding and other
|
||||
forms of full Network Address Port Translation. This can be
|
||||
controlled by iptables or nft.
|
||||
|
||||
if NF_NAT_IPV4
|
||||
|
||||
config NF_NAT_SNMP_BASIC
|
||||
tristate "Basic SNMP-ALG support"
|
||||
depends on NF_CONNTRACK_SNMP
|
||||
depends on NETFILTER_ADVANCED
|
||||
default NF_NAT && NF_CONNTRACK_SNMP
|
||||
---help---
|
||||
|
||||
This module implements an Application Layer Gateway (ALG) for
|
||||
SNMP payloads. In conjunction with NAT, it allows a network
|
||||
management system to access multiple private networks with
|
||||
conflicting addresses. It works by modifying IP addresses
|
||||
inside SNMP payloads to match IP-layer NAT mapping.
|
||||
|
||||
This is the "basic" form of SNMP-ALG, as described in RFC 2962
|
||||
|
||||
To compile it as a module, choose M here. If unsure, say N.
|
||||
|
||||
config NF_NAT_PROTO_GRE
|
||||
tristate
|
||||
depends on NF_CT_PROTO_GRE
|
||||
|
||||
config NF_NAT_PPTP
|
||||
tristate
|
||||
depends on NF_CONNTRACK
|
||||
default NF_CONNTRACK_PPTP
|
||||
select NF_NAT_PROTO_GRE
|
||||
|
||||
config NF_NAT_H323
|
||||
tristate
|
||||
depends on NF_CONNTRACK
|
||||
default NF_CONNTRACK_H323
|
||||
|
||||
endif # NF_NAT_IPV4
|
||||
|
||||
config IP_NF_IPTABLES
|
||||
tristate "IP tables support (required for filtering/masq/NAT)"
|
||||
default m if NETFILTER_ADVANCED=n
|
||||
@ -170,19 +216,21 @@ config IP_NF_TARGET_SYNPROXY
|
||||
To compile it as a module, choose M here. If unsure, say N.
|
||||
|
||||
# NAT + specific targets: nf_conntrack
|
||||
config NF_NAT_IPV4
|
||||
tristate "IPv4 NAT"
|
||||
config IP_NF_NAT
|
||||
tristate "iptables NAT support"
|
||||
depends on NF_CONNTRACK_IPV4
|
||||
default m if NETFILTER_ADVANCED=n
|
||||
select NF_NAT
|
||||
select NF_NAT_IPV4
|
||||
select NETFILTER_XT_NAT
|
||||
help
|
||||
The IPv4 NAT option allows masquerading, port forwarding and other
|
||||
forms of full Network Address Port Translation. It is controlled by
|
||||
the `nat' table in iptables: see the man page for iptables(8).
|
||||
This enables the `nat' table in iptables. This allows masquerading,
|
||||
port forwarding and other forms of full Network Address Port
|
||||
Translation.
|
||||
|
||||
To compile it as a module, choose M here. If unsure, say N.
|
||||
|
||||
if NF_NAT_IPV4
|
||||
if IP_NF_NAT
|
||||
|
||||
config IP_NF_TARGET_MASQUERADE
|
||||
tristate "MASQUERADE target support"
|
||||
@ -214,47 +262,7 @@ config IP_NF_TARGET_REDIRECT
|
||||
(e.g. when running oldconfig). It selects
|
||||
CONFIG_NETFILTER_XT_TARGET_REDIRECT.
|
||||
|
||||
endif
|
||||
|
||||
config NF_NAT_SNMP_BASIC
|
||||
tristate "Basic SNMP-ALG support"
|
||||
depends on NF_CONNTRACK_SNMP && NF_NAT_IPV4
|
||||
depends on NETFILTER_ADVANCED
|
||||
default NF_NAT && NF_CONNTRACK_SNMP
|
||||
---help---
|
||||
|
||||
This module implements an Application Layer Gateway (ALG) for
|
||||
SNMP payloads. In conjunction with NAT, it allows a network
|
||||
management system to access multiple private networks with
|
||||
conflicting addresses. It works by modifying IP addresses
|
||||
inside SNMP payloads to match IP-layer NAT mapping.
|
||||
|
||||
This is the "basic" form of SNMP-ALG, as described in RFC 2962
|
||||
|
||||
To compile it as a module, choose M here. If unsure, say N.
|
||||
|
||||
# If they want FTP, set to $CONFIG_IP_NF_NAT (m or y),
|
||||
# or $CONFIG_IP_NF_FTP (m or y), whichever is weaker.
|
||||
# From kconfig-language.txt:
|
||||
#
|
||||
# <expr> '&&' <expr> (6)
|
||||
#
|
||||
# (6) Returns the result of min(/expr/, /expr/).
|
||||
|
||||
config NF_NAT_PROTO_GRE
|
||||
tristate
|
||||
depends on NF_NAT_IPV4 && NF_CT_PROTO_GRE
|
||||
|
||||
config NF_NAT_PPTP
|
||||
tristate
|
||||
depends on NF_CONNTRACK && NF_NAT_IPV4
|
||||
default NF_NAT_IPV4 && NF_CONNTRACK_PPTP
|
||||
select NF_NAT_PROTO_GRE
|
||||
|
||||
config NF_NAT_H323
|
||||
tristate
|
||||
depends on NF_CONNTRACK && NF_NAT_IPV4
|
||||
default NF_NAT_IPV4 && NF_CONNTRACK_H323
|
||||
endif # IP_NF_NAT
|
||||
|
||||
# mangle + specific targets
|
||||
config IP_NF_MANGLE
|
||||
|
@ -43,7 +43,7 @@ obj-$(CONFIG_IP_NF_IPTABLES) += ip_tables.o
|
||||
# the three instances of ip_tables
|
||||
obj-$(CONFIG_IP_NF_FILTER) += iptable_filter.o
|
||||
obj-$(CONFIG_IP_NF_MANGLE) += iptable_mangle.o
|
||||
obj-$(CONFIG_NF_NAT_IPV4) += iptable_nat.o
|
||||
obj-$(CONFIG_IP_NF_NAT) += iptable_nat.o
|
||||
obj-$(CONFIG_IP_NF_RAW) += iptable_raw.o
|
||||
obj-$(CONFIG_IP_NF_SECURITY) += iptable_security.o
|
||||
|
||||
|
@ -1690,14 +1690,12 @@ void addrconf_dad_failure(struct inet6_ifaddr *ifp)
|
||||
addrconf_mod_dad_work(ifp, 0);
|
||||
}
|
||||
|
||||
/* Join to solicited addr multicast group. */
|
||||
|
||||
/* Join to solicited addr multicast group.
|
||||
* caller must hold RTNL */
|
||||
void addrconf_join_solict(struct net_device *dev, const struct in6_addr *addr)
|
||||
{
|
||||
struct in6_addr maddr;
|
||||
|
||||
ASSERT_RTNL();
|
||||
|
||||
if (dev->flags&(IFF_LOOPBACK|IFF_NOARP))
|
||||
return;
|
||||
|
||||
@ -1705,12 +1703,11 @@ void addrconf_join_solict(struct net_device *dev, const struct in6_addr *addr)
|
||||
ipv6_dev_mc_inc(dev, &maddr);
|
||||
}
|
||||
|
||||
/* caller must hold RTNL */
|
||||
void addrconf_leave_solict(struct inet6_dev *idev, const struct in6_addr *addr)
|
||||
{
|
||||
struct in6_addr maddr;
|
||||
|
||||
ASSERT_RTNL();
|
||||
|
||||
if (idev->dev->flags&(IFF_LOOPBACK|IFF_NOARP))
|
||||
return;
|
||||
|
||||
@ -1718,12 +1715,11 @@ void addrconf_leave_solict(struct inet6_dev *idev, const struct in6_addr *addr)
|
||||
__ipv6_dev_mc_dec(idev, &maddr);
|
||||
}
|
||||
|
||||
/* caller must hold RTNL */
|
||||
static void addrconf_join_anycast(struct inet6_ifaddr *ifp)
|
||||
{
|
||||
struct in6_addr addr;
|
||||
|
||||
ASSERT_RTNL();
|
||||
|
||||
if (ifp->prefix_len >= 127) /* RFC 6164 */
|
||||
return;
|
||||
ipv6_addr_prefix(&addr, &ifp->addr, ifp->prefix_len);
|
||||
@ -1732,12 +1728,11 @@ static void addrconf_join_anycast(struct inet6_ifaddr *ifp)
|
||||
ipv6_dev_ac_inc(ifp->idev->dev, &addr);
|
||||
}
|
||||
|
||||
/* caller must hold RTNL */
|
||||
static void addrconf_leave_anycast(struct inet6_ifaddr *ifp)
|
||||
{
|
||||
struct in6_addr addr;
|
||||
|
||||
ASSERT_RTNL();
|
||||
|
||||
if (ifp->prefix_len >= 127) /* RFC 6164 */
|
||||
return;
|
||||
ipv6_addr_prefix(&addr, &ifp->addr, ifp->prefix_len);
|
||||
@ -4773,15 +4768,11 @@ static void __ipv6_ifa_notify(int event, struct inet6_ifaddr *ifp)
|
||||
addrconf_leave_solict(ifp->idev, &ifp->addr);
|
||||
if (!ipv6_addr_any(&ifp->peer_addr)) {
|
||||
struct rt6_info *rt;
|
||||
struct net_device *dev = ifp->idev->dev;
|
||||
|
||||
rt = rt6_lookup(dev_net(dev), &ifp->peer_addr, NULL,
|
||||
dev->ifindex, 1);
|
||||
if (rt) {
|
||||
dst_hold(&rt->dst);
|
||||
if (ip6_del_rt(rt))
|
||||
dst_free(&rt->dst);
|
||||
}
|
||||
rt = addrconf_get_prefix_route(&ifp->peer_addr, 128,
|
||||
ifp->idev->dev, 0, 0);
|
||||
if (rt && ip6_del_rt(rt))
|
||||
dst_free(&rt->dst);
|
||||
}
|
||||
dst_hold(&ifp->rt->dst);
|
||||
|
||||
|
@ -77,6 +77,7 @@ int ipv6_sock_ac_join(struct sock *sk, int ifindex, const struct in6_addr *addr)
|
||||
pac->acl_next = NULL;
|
||||
pac->acl_addr = *addr;
|
||||
|
||||
rtnl_lock();
|
||||
rcu_read_lock();
|
||||
if (ifindex == 0) {
|
||||
struct rt6_info *rt;
|
||||
@ -137,6 +138,7 @@ int ipv6_sock_ac_join(struct sock *sk, int ifindex, const struct in6_addr *addr)
|
||||
|
||||
error:
|
||||
rcu_read_unlock();
|
||||
rtnl_unlock();
|
||||
if (pac)
|
||||
sock_kfree_s(sk, pac, sizeof(*pac));
|
||||
return err;
|
||||
@ -171,11 +173,13 @@ int ipv6_sock_ac_drop(struct sock *sk, int ifindex, const struct in6_addr *addr)
|
||||
|
||||
spin_unlock_bh(&ipv6_sk_ac_lock);
|
||||
|
||||
rtnl_lock();
|
||||
rcu_read_lock();
|
||||
dev = dev_get_by_index_rcu(net, pac->acl_ifindex);
|
||||
if (dev)
|
||||
ipv6_dev_ac_dec(dev, &pac->acl_addr);
|
||||
rcu_read_unlock();
|
||||
rtnl_unlock();
|
||||
|
||||
sock_kfree_s(sk, pac, sizeof(*pac));
|
||||
return 0;
|
||||
@ -198,6 +202,7 @@ void ipv6_sock_ac_close(struct sock *sk)
|
||||
spin_unlock_bh(&ipv6_sk_ac_lock);
|
||||
|
||||
prev_index = 0;
|
||||
rtnl_lock();
|
||||
rcu_read_lock();
|
||||
while (pac) {
|
||||
struct ipv6_ac_socklist *next = pac->acl_next;
|
||||
@ -212,6 +217,7 @@ void ipv6_sock_ac_close(struct sock *sk)
|
||||
pac = next;
|
||||
}
|
||||
rcu_read_unlock();
|
||||
rtnl_unlock();
|
||||
}
|
||||
|
||||
static void aca_put(struct ifacaddr6 *ac)
|
||||
@ -233,6 +239,8 @@ int ipv6_dev_ac_inc(struct net_device *dev, const struct in6_addr *addr)
|
||||
struct rt6_info *rt;
|
||||
int err;
|
||||
|
||||
ASSERT_RTNL();
|
||||
|
||||
idev = in6_dev_get(dev);
|
||||
|
||||
if (idev == NULL)
|
||||
@ -302,6 +310,8 @@ int __ipv6_dev_ac_dec(struct inet6_dev *idev, const struct in6_addr *addr)
|
||||
{
|
||||
struct ifacaddr6 *aca, *prev_aca;
|
||||
|
||||
ASSERT_RTNL();
|
||||
|
||||
write_lock_bh(&idev->lock);
|
||||
prev_aca = NULL;
|
||||
for (aca = idev->ac_list; aca; aca = aca->aca_next) {
|
||||
|
@ -172,6 +172,7 @@ int ipv6_sock_mc_join(struct sock *sk, int ifindex, const struct in6_addr *addr)
|
||||
mc_lst->next = NULL;
|
||||
mc_lst->addr = *addr;
|
||||
|
||||
rtnl_lock();
|
||||
rcu_read_lock();
|
||||
if (ifindex == 0) {
|
||||
struct rt6_info *rt;
|
||||
@ -185,6 +186,7 @@ int ipv6_sock_mc_join(struct sock *sk, int ifindex, const struct in6_addr *addr)
|
||||
|
||||
if (dev == NULL) {
|
||||
rcu_read_unlock();
|
||||
rtnl_unlock();
|
||||
sock_kfree_s(sk, mc_lst, sizeof(*mc_lst));
|
||||
return -ENODEV;
|
||||
}
|
||||
@ -202,6 +204,7 @@ int ipv6_sock_mc_join(struct sock *sk, int ifindex, const struct in6_addr *addr)
|
||||
|
||||
if (err) {
|
||||
rcu_read_unlock();
|
||||
rtnl_unlock();
|
||||
sock_kfree_s(sk, mc_lst, sizeof(*mc_lst));
|
||||
return err;
|
||||
}
|
||||
@ -212,6 +215,7 @@ int ipv6_sock_mc_join(struct sock *sk, int ifindex, const struct in6_addr *addr)
|
||||
spin_unlock(&ipv6_sk_mc_lock);
|
||||
|
||||
rcu_read_unlock();
|
||||
rtnl_unlock();
|
||||
|
||||
return 0;
|
||||
}
|
||||
@ -229,6 +233,7 @@ int ipv6_sock_mc_drop(struct sock *sk, int ifindex, const struct in6_addr *addr)
|
||||
if (!ipv6_addr_is_multicast(addr))
|
||||
return -EINVAL;
|
||||
|
||||
rtnl_lock();
|
||||
spin_lock(&ipv6_sk_mc_lock);
|
||||
for (lnk = &np->ipv6_mc_list;
|
||||
(mc_lst = rcu_dereference_protected(*lnk,
|
||||
@ -252,12 +257,15 @@ int ipv6_sock_mc_drop(struct sock *sk, int ifindex, const struct in6_addr *addr)
|
||||
} else
|
||||
(void) ip6_mc_leave_src(sk, mc_lst, NULL);
|
||||
rcu_read_unlock();
|
||||
rtnl_unlock();
|
||||
|
||||
atomic_sub(sizeof(*mc_lst), &sk->sk_omem_alloc);
|
||||
kfree_rcu(mc_lst, rcu);
|
||||
return 0;
|
||||
}
|
||||
}
|
||||
spin_unlock(&ipv6_sk_mc_lock);
|
||||
rtnl_unlock();
|
||||
|
||||
return -EADDRNOTAVAIL;
|
||||
}
|
||||
@ -302,6 +310,7 @@ void ipv6_sock_mc_close(struct sock *sk)
|
||||
if (!rcu_access_pointer(np->ipv6_mc_list))
|
||||
return;
|
||||
|
||||
rtnl_lock();
|
||||
spin_lock(&ipv6_sk_mc_lock);
|
||||
while ((mc_lst = rcu_dereference_protected(np->ipv6_mc_list,
|
||||
lockdep_is_held(&ipv6_sk_mc_lock))) != NULL) {
|
||||
@ -328,6 +337,7 @@ void ipv6_sock_mc_close(struct sock *sk)
|
||||
spin_lock(&ipv6_sk_mc_lock);
|
||||
}
|
||||
spin_unlock(&ipv6_sk_mc_lock);
|
||||
rtnl_unlock();
|
||||
}
|
||||
|
||||
int ip6_mc_source(int add, int omode, struct sock *sk,
|
||||
@ -845,6 +855,8 @@ int ipv6_dev_mc_inc(struct net_device *dev, const struct in6_addr *addr)
|
||||
struct ifmcaddr6 *mc;
|
||||
struct inet6_dev *idev;
|
||||
|
||||
ASSERT_RTNL();
|
||||
|
||||
/* we need to take a reference on idev */
|
||||
idev = in6_dev_get(dev);
|
||||
|
||||
@ -916,6 +928,8 @@ int __ipv6_dev_mc_dec(struct inet6_dev *idev, const struct in6_addr *addr)
|
||||
{
|
||||
struct ifmcaddr6 *ma, **map;
|
||||
|
||||
ASSERT_RTNL();
|
||||
|
||||
write_lock_bh(&idev->lock);
|
||||
for (map = &idev->mc_list; (ma=*map) != NULL; map = &ma->next) {
|
||||
if (ipv6_addr_equal(&ma->mca_addr, addr)) {
|
||||
|
@ -57,9 +57,19 @@ config NFT_REJECT_IPV6
|
||||
|
||||
config NF_LOG_IPV6
|
||||
tristate "IPv6 packet logging"
|
||||
depends on NETFILTER_ADVANCED
|
||||
default m if NETFILTER_ADVANCED=n
|
||||
select NF_LOG_COMMON
|
||||
|
||||
config NF_NAT_IPV6
|
||||
tristate "IPv6 NAT"
|
||||
depends on NF_CONNTRACK_IPV6
|
||||
depends on NETFILTER_ADVANCED
|
||||
select NF_NAT
|
||||
help
|
||||
The IPv6 NAT option allows masquerading, port forwarding and other
|
||||
forms of full Network Address Port Translation. This can be
|
||||
controlled by iptables or nft.
|
||||
|
||||
config IP6_NF_IPTABLES
|
||||
tristate "IP6 tables support (required for filtering)"
|
||||
depends on INET && IPV6
|
||||
@ -232,19 +242,21 @@ config IP6_NF_SECURITY
|
||||
|
||||
If unsure, say N.
|
||||
|
||||
config NF_NAT_IPV6
|
||||
tristate "IPv6 NAT"
|
||||
config IP6_NF_NAT
|
||||
tristate "ip6tables NAT support"
|
||||
depends on NF_CONNTRACK_IPV6
|
||||
depends on NETFILTER_ADVANCED
|
||||
select NF_NAT
|
||||
select NF_NAT_IPV6
|
||||
select NETFILTER_XT_NAT
|
||||
help
|
||||
The IPv6 NAT option allows masquerading, port forwarding and other
|
||||
forms of full Network Address Port Translation. It is controlled by
|
||||
the `nat' table in ip6tables, see the man page for ip6tables(8).
|
||||
This enables the `nat' table in ip6tables. This allows masquerading,
|
||||
port forwarding and other forms of full Network Address Port
|
||||
Translation.
|
||||
|
||||
To compile it as a module, choose M here. If unsure, say N.
|
||||
|
||||
if NF_NAT_IPV6
|
||||
if IP6_NF_NAT
|
||||
|
||||
config IP6_NF_TARGET_MASQUERADE
|
||||
tristate "MASQUERADE target support"
|
||||
@ -265,7 +277,7 @@ config IP6_NF_TARGET_NPT
|
||||
|
||||
To compile it as a module, choose M here. If unsure, say N.
|
||||
|
||||
endif # NF_NAT_IPV6
|
||||
endif # IP6_NF_NAT
|
||||
|
||||
endif # IP6_NF_IPTABLES
|
||||
|
||||
|
@ -8,7 +8,7 @@ obj-$(CONFIG_IP6_NF_FILTER) += ip6table_filter.o
|
||||
obj-$(CONFIG_IP6_NF_MANGLE) += ip6table_mangle.o
|
||||
obj-$(CONFIG_IP6_NF_RAW) += ip6table_raw.o
|
||||
obj-$(CONFIG_IP6_NF_SECURITY) += ip6table_security.o
|
||||
obj-$(CONFIG_NF_NAT_IPV6) += ip6table_nat.o
|
||||
obj-$(CONFIG_IP6_NF_NAT) += ip6table_nat.o
|
||||
|
||||
# objects for l3 independent conntrack
|
||||
nf_conntrack_ipv6-y := nf_conntrack_l3proto_ipv6.o nf_conntrack_proto_icmpv6.o
|
||||
|
@ -755,7 +755,8 @@ static int pppol2tp_connect(struct socket *sock, struct sockaddr *uservaddr,
|
||||
/* If PMTU discovery was enabled, use the MTU that was discovered */
|
||||
dst = sk_dst_get(tunnel->sock);
|
||||
if (dst != NULL) {
|
||||
u32 pmtu = dst_mtu(__sk_dst_get(tunnel->sock));
|
||||
u32 pmtu = dst_mtu(dst);
|
||||
|
||||
if (pmtu != 0)
|
||||
session->mtu = session->mru = pmtu -
|
||||
PPPOL2TP_HEADER_OVERHEAD;
|
||||
|
@ -541,6 +541,8 @@ static void ieee80211_recalc_chanctx_chantype(struct ieee80211_local *local,
|
||||
continue;
|
||||
if (rcu_access_pointer(sdata->vif.chanctx_conf) != conf)
|
||||
continue;
|
||||
if (sdata->vif.type == NL80211_IFTYPE_AP_VLAN)
|
||||
continue;
|
||||
|
||||
if (!compat)
|
||||
compat = &sdata->vif.bss_conf.chandef;
|
||||
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue
Block a user