Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net-2.6
* git://git.kernel.org/pub/scm/linux/kernel/git/davem/net-2.6: (56 commits) route: Take the right src and dst addresses in ip_route_newports ipv4: Fix nexthop caching wrt. scoping. ipv4: Invalidate nexthop cache nh_saddr more correctly. net: fix pch_gbe section mismatch warning ipv4: fix fib metrics mlx4_en: Removing HW info from ethtool -i report. net_sched: fix THROTTLED/RUNNING race drivers/net/a2065.c: Convert release_resource to release_region/release_mem_region drivers/net/ariadne.c: Convert release_resource to release_region/release_mem_region bonding: fix rx_handler locking myri10ge: fix rmmod crash mlx4_en: updated driver version to 1.5.4.1 mlx4_en: Using blue flame support mlx4_core: reserve UARs for userspace consumers mlx4_core: maintain available field in bitmap allocator mlx4: Add blue flame support for kernel consumers mlx4_en: Enabling new steering mlx4: Add support for promiscuous mode in the new steering model. mlx4: generalization of multicast steering. mlx4_en: Reporting HW revision in ethtool -i ...
This commit is contained in:
commit
00a2470546
@ -625,7 +625,7 @@ static int mlx4_ib_mcg_attach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid)
|
|||||||
|
|
||||||
err = mlx4_multicast_attach(mdev->dev, &mqp->mqp, gid->raw,
|
err = mlx4_multicast_attach(mdev->dev, &mqp->mqp, gid->raw,
|
||||||
!!(mqp->flags & MLX4_IB_QP_BLOCK_MULTICAST_LOOPBACK),
|
!!(mqp->flags & MLX4_IB_QP_BLOCK_MULTICAST_LOOPBACK),
|
||||||
MLX4_PROTOCOL_IB);
|
MLX4_PROT_IB_IPV6);
|
||||||
if (err)
|
if (err)
|
||||||
return err;
|
return err;
|
||||||
|
|
||||||
@ -636,7 +636,7 @@ static int mlx4_ib_mcg_attach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid)
|
|||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
err_add:
|
err_add:
|
||||||
mlx4_multicast_detach(mdev->dev, &mqp->mqp, gid->raw, MLX4_PROTOCOL_IB);
|
mlx4_multicast_detach(mdev->dev, &mqp->mqp, gid->raw, MLX4_PROT_IB_IPV6);
|
||||||
return err;
|
return err;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -666,7 +666,7 @@ static int mlx4_ib_mcg_detach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid)
|
|||||||
struct mlx4_ib_gid_entry *ge;
|
struct mlx4_ib_gid_entry *ge;
|
||||||
|
|
||||||
err = mlx4_multicast_detach(mdev->dev,
|
err = mlx4_multicast_detach(mdev->dev,
|
||||||
&mqp->mqp, gid->raw, MLX4_PROTOCOL_IB);
|
&mqp->mqp, gid->raw, MLX4_PROT_IB_IPV6);
|
||||||
if (err)
|
if (err)
|
||||||
return err;
|
return err;
|
||||||
|
|
||||||
@ -721,7 +721,6 @@ static int init_node_data(struct mlx4_ib_dev *dev)
|
|||||||
if (err)
|
if (err)
|
||||||
goto out;
|
goto out;
|
||||||
|
|
||||||
dev->dev->rev_id = be32_to_cpup((__be32 *) (out_mad->data + 32));
|
|
||||||
memcpy(&dev->ib_dev.node_guid, out_mad->data + 12, 8);
|
memcpy(&dev->ib_dev.node_guid, out_mad->data + 12, 8);
|
||||||
|
|
||||||
out:
|
out:
|
||||||
@ -954,7 +953,7 @@ static int mlx4_ib_netdev_event(struct notifier_block *this, unsigned long event
|
|||||||
mlx4_foreach_ib_transport_port(port, ibdev->dev) {
|
mlx4_foreach_ib_transport_port(port, ibdev->dev) {
|
||||||
oldnd = iboe->netdevs[port - 1];
|
oldnd = iboe->netdevs[port - 1];
|
||||||
iboe->netdevs[port - 1] =
|
iboe->netdevs[port - 1] =
|
||||||
mlx4_get_protocol_dev(ibdev->dev, MLX4_PROTOCOL_EN, port);
|
mlx4_get_protocol_dev(ibdev->dev, MLX4_PROT_ETH, port);
|
||||||
if (oldnd != iboe->netdevs[port - 1]) {
|
if (oldnd != iboe->netdevs[port - 1]) {
|
||||||
if (iboe->netdevs[port - 1])
|
if (iboe->netdevs[port - 1])
|
||||||
netdev_added(ibdev, port);
|
netdev_added(ibdev, port);
|
||||||
@ -1207,7 +1206,7 @@ static struct mlx4_interface mlx4_ib_interface = {
|
|||||||
.add = mlx4_ib_add,
|
.add = mlx4_ib_add,
|
||||||
.remove = mlx4_ib_remove,
|
.remove = mlx4_ib_remove,
|
||||||
.event = mlx4_ib_event,
|
.event = mlx4_ib_event,
|
||||||
.protocol = MLX4_PROTOCOL_IB
|
.protocol = MLX4_PROT_IB_IPV6
|
||||||
};
|
};
|
||||||
|
|
||||||
static int __init mlx4_ib_init(void)
|
static int __init mlx4_ib_init(void)
|
||||||
|
@ -711,14 +711,14 @@ static int __devinit a2065_init_one(struct zorro_dev *z,
|
|||||||
return -EBUSY;
|
return -EBUSY;
|
||||||
r2 = request_mem_region(mem_start, A2065_RAM_SIZE, "RAM");
|
r2 = request_mem_region(mem_start, A2065_RAM_SIZE, "RAM");
|
||||||
if (!r2) {
|
if (!r2) {
|
||||||
release_resource(r1);
|
release_mem_region(base_addr, sizeof(struct lance_regs));
|
||||||
return -EBUSY;
|
return -EBUSY;
|
||||||
}
|
}
|
||||||
|
|
||||||
dev = alloc_etherdev(sizeof(struct lance_private));
|
dev = alloc_etherdev(sizeof(struct lance_private));
|
||||||
if (dev == NULL) {
|
if (dev == NULL) {
|
||||||
release_resource(r1);
|
release_mem_region(base_addr, sizeof(struct lance_regs));
|
||||||
release_resource(r2);
|
release_mem_region(mem_start, A2065_RAM_SIZE);
|
||||||
return -ENOMEM;
|
return -ENOMEM;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -764,8 +764,8 @@ static int __devinit a2065_init_one(struct zorro_dev *z,
|
|||||||
|
|
||||||
err = register_netdev(dev);
|
err = register_netdev(dev);
|
||||||
if (err) {
|
if (err) {
|
||||||
release_resource(r1);
|
release_mem_region(base_addr, sizeof(struct lance_regs));
|
||||||
release_resource(r2);
|
release_mem_region(mem_start, A2065_RAM_SIZE);
|
||||||
free_netdev(dev);
|
free_netdev(dev);
|
||||||
return err;
|
return err;
|
||||||
}
|
}
|
||||||
|
@ -182,14 +182,14 @@ static int __devinit ariadne_init_one(struct zorro_dev *z,
|
|||||||
return -EBUSY;
|
return -EBUSY;
|
||||||
r2 = request_mem_region(mem_start, ARIADNE_RAM_SIZE, "RAM");
|
r2 = request_mem_region(mem_start, ARIADNE_RAM_SIZE, "RAM");
|
||||||
if (!r2) {
|
if (!r2) {
|
||||||
release_resource(r1);
|
release_mem_region(base_addr, sizeof(struct Am79C960));
|
||||||
return -EBUSY;
|
return -EBUSY;
|
||||||
}
|
}
|
||||||
|
|
||||||
dev = alloc_etherdev(sizeof(struct ariadne_private));
|
dev = alloc_etherdev(sizeof(struct ariadne_private));
|
||||||
if (dev == NULL) {
|
if (dev == NULL) {
|
||||||
release_resource(r1);
|
release_mem_region(base_addr, sizeof(struct Am79C960));
|
||||||
release_resource(r2);
|
release_mem_region(mem_start, ARIADNE_RAM_SIZE);
|
||||||
return -ENOMEM;
|
return -ENOMEM;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -213,8 +213,8 @@ static int __devinit ariadne_init_one(struct zorro_dev *z,
|
|||||||
|
|
||||||
err = register_netdev(dev);
|
err = register_netdev(dev);
|
||||||
if (err) {
|
if (err) {
|
||||||
release_resource(r1);
|
release_mem_region(base_addr, sizeof(struct Am79C960));
|
||||||
release_resource(r2);
|
release_mem_region(mem_start, ARIADNE_RAM_SIZE);
|
||||||
free_netdev(dev);
|
free_netdev(dev);
|
||||||
return err;
|
return err;
|
||||||
}
|
}
|
||||||
|
@ -1482,21 +1482,16 @@ static rx_handler_result_t bond_handle_frame(struct sk_buff **pskb)
|
|||||||
{
|
{
|
||||||
struct sk_buff *skb = *pskb;
|
struct sk_buff *skb = *pskb;
|
||||||
struct slave *slave;
|
struct slave *slave;
|
||||||
struct net_device *bond_dev;
|
|
||||||
struct bonding *bond;
|
struct bonding *bond;
|
||||||
|
|
||||||
slave = bond_slave_get_rcu(skb->dev);
|
|
||||||
bond_dev = ACCESS_ONCE(slave->dev->master);
|
|
||||||
if (unlikely(!bond_dev))
|
|
||||||
return RX_HANDLER_PASS;
|
|
||||||
|
|
||||||
skb = skb_share_check(skb, GFP_ATOMIC);
|
skb = skb_share_check(skb, GFP_ATOMIC);
|
||||||
if (unlikely(!skb))
|
if (unlikely(!skb))
|
||||||
return RX_HANDLER_CONSUMED;
|
return RX_HANDLER_CONSUMED;
|
||||||
|
|
||||||
*pskb = skb;
|
*pskb = skb;
|
||||||
|
|
||||||
bond = netdev_priv(bond_dev);
|
slave = bond_slave_get_rcu(skb->dev);
|
||||||
|
bond = slave->bond;
|
||||||
|
|
||||||
if (bond->params.arp_interval)
|
if (bond->params.arp_interval)
|
||||||
slave->dev->last_rx = jiffies;
|
slave->dev->last_rx = jiffies;
|
||||||
@ -1505,10 +1500,10 @@ static rx_handler_result_t bond_handle_frame(struct sk_buff **pskb)
|
|||||||
return RX_HANDLER_EXACT;
|
return RX_HANDLER_EXACT;
|
||||||
}
|
}
|
||||||
|
|
||||||
skb->dev = bond_dev;
|
skb->dev = bond->dev;
|
||||||
|
|
||||||
if (bond->params.mode == BOND_MODE_ALB &&
|
if (bond->params.mode == BOND_MODE_ALB &&
|
||||||
bond_dev->priv_flags & IFF_BRIDGE_PORT &&
|
bond->dev->priv_flags & IFF_BRIDGE_PORT &&
|
||||||
skb->pkt_type == PACKET_HOST) {
|
skb->pkt_type == PACKET_HOST) {
|
||||||
|
|
||||||
if (unlikely(skb_cow_head(skb,
|
if (unlikely(skb_cow_head(skb,
|
||||||
@ -1516,7 +1511,7 @@ static rx_handler_result_t bond_handle_frame(struct sk_buff **pskb)
|
|||||||
kfree_skb(skb);
|
kfree_skb(skb);
|
||||||
return RX_HANDLER_CONSUMED;
|
return RX_HANDLER_CONSUMED;
|
||||||
}
|
}
|
||||||
memcpy(eth_hdr(skb)->h_dest, bond_dev->dev_addr, ETH_ALEN);
|
memcpy(eth_hdr(skb)->h_dest, bond->dev->dev_addr, ETH_ALEN);
|
||||||
}
|
}
|
||||||
|
|
||||||
return RX_HANDLER_ANOTHER;
|
return RX_HANDLER_ANOTHER;
|
||||||
@ -1698,20 +1693,15 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev)
|
|||||||
pr_debug("Error %d calling netdev_set_bond_master\n", res);
|
pr_debug("Error %d calling netdev_set_bond_master\n", res);
|
||||||
goto err_restore_mac;
|
goto err_restore_mac;
|
||||||
}
|
}
|
||||||
res = netdev_rx_handler_register(slave_dev, bond_handle_frame,
|
|
||||||
new_slave);
|
|
||||||
if (res) {
|
|
||||||
pr_debug("Error %d calling netdev_rx_handler_register\n", res);
|
|
||||||
goto err_unset_master;
|
|
||||||
}
|
|
||||||
|
|
||||||
/* open the slave since the application closed it */
|
/* open the slave since the application closed it */
|
||||||
res = dev_open(slave_dev);
|
res = dev_open(slave_dev);
|
||||||
if (res) {
|
if (res) {
|
||||||
pr_debug("Opening slave %s failed\n", slave_dev->name);
|
pr_debug("Opening slave %s failed\n", slave_dev->name);
|
||||||
goto err_unreg_rxhandler;
|
goto err_unset_master;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
new_slave->bond = bond;
|
||||||
new_slave->dev = slave_dev;
|
new_slave->dev = slave_dev;
|
||||||
slave_dev->priv_flags |= IFF_BONDING;
|
slave_dev->priv_flags |= IFF_BONDING;
|
||||||
|
|
||||||
@ -1907,6 +1897,13 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev)
|
|||||||
if (res)
|
if (res)
|
||||||
goto err_close;
|
goto err_close;
|
||||||
|
|
||||||
|
res = netdev_rx_handler_register(slave_dev, bond_handle_frame,
|
||||||
|
new_slave);
|
||||||
|
if (res) {
|
||||||
|
pr_debug("Error %d calling netdev_rx_handler_register\n", res);
|
||||||
|
goto err_dest_symlinks;
|
||||||
|
}
|
||||||
|
|
||||||
pr_info("%s: enslaving %s as a%s interface with a%s link.\n",
|
pr_info("%s: enslaving %s as a%s interface with a%s link.\n",
|
||||||
bond_dev->name, slave_dev->name,
|
bond_dev->name, slave_dev->name,
|
||||||
bond_is_active_slave(new_slave) ? "n active" : " backup",
|
bond_is_active_slave(new_slave) ? "n active" : " backup",
|
||||||
@ -1916,13 +1913,12 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev)
|
|||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
/* Undo stages on error */
|
/* Undo stages on error */
|
||||||
|
err_dest_symlinks:
|
||||||
|
bond_destroy_slave_symlinks(bond_dev, slave_dev);
|
||||||
|
|
||||||
err_close:
|
err_close:
|
||||||
dev_close(slave_dev);
|
dev_close(slave_dev);
|
||||||
|
|
||||||
err_unreg_rxhandler:
|
|
||||||
netdev_rx_handler_unregister(slave_dev);
|
|
||||||
synchronize_net();
|
|
||||||
|
|
||||||
err_unset_master:
|
err_unset_master:
|
||||||
netdev_set_bond_master(slave_dev, NULL);
|
netdev_set_bond_master(slave_dev, NULL);
|
||||||
|
|
||||||
@ -1988,6 +1984,14 @@ int bond_release(struct net_device *bond_dev, struct net_device *slave_dev)
|
|||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/* unregister rx_handler early so bond_handle_frame wouldn't be called
|
||||||
|
* for this slave anymore.
|
||||||
|
*/
|
||||||
|
netdev_rx_handler_unregister(slave_dev);
|
||||||
|
write_unlock_bh(&bond->lock);
|
||||||
|
synchronize_net();
|
||||||
|
write_lock_bh(&bond->lock);
|
||||||
|
|
||||||
if (!bond->params.fail_over_mac) {
|
if (!bond->params.fail_over_mac) {
|
||||||
if (!compare_ether_addr(bond_dev->dev_addr, slave->perm_hwaddr) &&
|
if (!compare_ether_addr(bond_dev->dev_addr, slave->perm_hwaddr) &&
|
||||||
bond->slave_cnt > 1)
|
bond->slave_cnt > 1)
|
||||||
@ -2104,8 +2108,6 @@ int bond_release(struct net_device *bond_dev, struct net_device *slave_dev)
|
|||||||
netif_addr_unlock_bh(bond_dev);
|
netif_addr_unlock_bh(bond_dev);
|
||||||
}
|
}
|
||||||
|
|
||||||
netdev_rx_handler_unregister(slave_dev);
|
|
||||||
synchronize_net();
|
|
||||||
netdev_set_bond_master(slave_dev, NULL);
|
netdev_set_bond_master(slave_dev, NULL);
|
||||||
|
|
||||||
slave_disable_netpoll(slave);
|
slave_disable_netpoll(slave);
|
||||||
@ -2186,6 +2188,12 @@ static int bond_release_all(struct net_device *bond_dev)
|
|||||||
*/
|
*/
|
||||||
write_unlock_bh(&bond->lock);
|
write_unlock_bh(&bond->lock);
|
||||||
|
|
||||||
|
/* unregister rx_handler early so bond_handle_frame wouldn't
|
||||||
|
* be called for this slave anymore.
|
||||||
|
*/
|
||||||
|
netdev_rx_handler_unregister(slave_dev);
|
||||||
|
synchronize_net();
|
||||||
|
|
||||||
if (bond_is_lb(bond)) {
|
if (bond_is_lb(bond)) {
|
||||||
/* must be called only after the slave
|
/* must be called only after the slave
|
||||||
* has been detached from the list
|
* has been detached from the list
|
||||||
@ -2217,8 +2225,6 @@ static int bond_release_all(struct net_device *bond_dev)
|
|||||||
netif_addr_unlock_bh(bond_dev);
|
netif_addr_unlock_bh(bond_dev);
|
||||||
}
|
}
|
||||||
|
|
||||||
netdev_rx_handler_unregister(slave_dev);
|
|
||||||
synchronize_net();
|
|
||||||
netdev_set_bond_master(slave_dev, NULL);
|
netdev_set_bond_master(slave_dev, NULL);
|
||||||
|
|
||||||
slave_disable_netpoll(slave);
|
slave_disable_netpoll(slave);
|
||||||
|
@ -187,6 +187,7 @@ struct slave {
|
|||||||
struct net_device *dev; /* first - useful for panic debug */
|
struct net_device *dev; /* first - useful for panic debug */
|
||||||
struct slave *next;
|
struct slave *next;
|
||||||
struct slave *prev;
|
struct slave *prev;
|
||||||
|
struct bonding *bond; /* our master */
|
||||||
int delay;
|
int delay;
|
||||||
unsigned long jiffies;
|
unsigned long jiffies;
|
||||||
unsigned long last_arp_rx;
|
unsigned long last_arp_rx;
|
||||||
|
@ -76,6 +76,7 @@ struct cpdma_desc {
|
|||||||
|
|
||||||
struct cpdma_desc_pool {
|
struct cpdma_desc_pool {
|
||||||
u32 phys;
|
u32 phys;
|
||||||
|
u32 hw_addr;
|
||||||
void __iomem *iomap; /* ioremap map */
|
void __iomem *iomap; /* ioremap map */
|
||||||
void *cpumap; /* dma_alloc map */
|
void *cpumap; /* dma_alloc map */
|
||||||
int desc_size, mem_size;
|
int desc_size, mem_size;
|
||||||
@ -137,7 +138,8 @@ struct cpdma_chan {
|
|||||||
* abstract out these details
|
* abstract out these details
|
||||||
*/
|
*/
|
||||||
static struct cpdma_desc_pool *
|
static struct cpdma_desc_pool *
|
||||||
cpdma_desc_pool_create(struct device *dev, u32 phys, int size, int align)
|
cpdma_desc_pool_create(struct device *dev, u32 phys, u32 hw_addr,
|
||||||
|
int size, int align)
|
||||||
{
|
{
|
||||||
int bitmap_size;
|
int bitmap_size;
|
||||||
struct cpdma_desc_pool *pool;
|
struct cpdma_desc_pool *pool;
|
||||||
@ -161,10 +163,12 @@ cpdma_desc_pool_create(struct device *dev, u32 phys, int size, int align)
|
|||||||
if (phys) {
|
if (phys) {
|
||||||
pool->phys = phys;
|
pool->phys = phys;
|
||||||
pool->iomap = ioremap(phys, size);
|
pool->iomap = ioremap(phys, size);
|
||||||
|
pool->hw_addr = hw_addr;
|
||||||
} else {
|
} else {
|
||||||
pool->cpumap = dma_alloc_coherent(dev, size, &pool->phys,
|
pool->cpumap = dma_alloc_coherent(dev, size, &pool->phys,
|
||||||
GFP_KERNEL);
|
GFP_KERNEL);
|
||||||
pool->iomap = (void __force __iomem *)pool->cpumap;
|
pool->iomap = (void __force __iomem *)pool->cpumap;
|
||||||
|
pool->hw_addr = pool->phys;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (pool->iomap)
|
if (pool->iomap)
|
||||||
@ -201,14 +205,14 @@ static inline dma_addr_t desc_phys(struct cpdma_desc_pool *pool,
|
|||||||
{
|
{
|
||||||
if (!desc)
|
if (!desc)
|
||||||
return 0;
|
return 0;
|
||||||
return pool->phys + (__force dma_addr_t)desc -
|
return pool->hw_addr + (__force dma_addr_t)desc -
|
||||||
(__force dma_addr_t)pool->iomap;
|
(__force dma_addr_t)pool->iomap;
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline struct cpdma_desc __iomem *
|
static inline struct cpdma_desc __iomem *
|
||||||
desc_from_phys(struct cpdma_desc_pool *pool, dma_addr_t dma)
|
desc_from_phys(struct cpdma_desc_pool *pool, dma_addr_t dma)
|
||||||
{
|
{
|
||||||
return dma ? pool->iomap + dma - pool->phys : NULL;
|
return dma ? pool->iomap + dma - pool->hw_addr : NULL;
|
||||||
}
|
}
|
||||||
|
|
||||||
static struct cpdma_desc __iomem *
|
static struct cpdma_desc __iomem *
|
||||||
@ -260,6 +264,7 @@ struct cpdma_ctlr *cpdma_ctlr_create(struct cpdma_params *params)
|
|||||||
|
|
||||||
ctlr->pool = cpdma_desc_pool_create(ctlr->dev,
|
ctlr->pool = cpdma_desc_pool_create(ctlr->dev,
|
||||||
ctlr->params.desc_mem_phys,
|
ctlr->params.desc_mem_phys,
|
||||||
|
ctlr->params.desc_hw_addr,
|
||||||
ctlr->params.desc_mem_size,
|
ctlr->params.desc_mem_size,
|
||||||
ctlr->params.desc_align);
|
ctlr->params.desc_align);
|
||||||
if (!ctlr->pool) {
|
if (!ctlr->pool) {
|
||||||
|
@ -33,6 +33,7 @@ struct cpdma_params {
|
|||||||
bool has_soft_reset;
|
bool has_soft_reset;
|
||||||
int min_packet_size;
|
int min_packet_size;
|
||||||
u32 desc_mem_phys;
|
u32 desc_mem_phys;
|
||||||
|
u32 desc_hw_addr;
|
||||||
int desc_mem_size;
|
int desc_mem_size;
|
||||||
int desc_align;
|
int desc_align;
|
||||||
|
|
||||||
|
@ -1854,10 +1854,13 @@ static int __devinit davinci_emac_probe(struct platform_device *pdev)
|
|||||||
dma_params.rxcp = priv->emac_base + 0x660;
|
dma_params.rxcp = priv->emac_base + 0x660;
|
||||||
dma_params.num_chan = EMAC_MAX_TXRX_CHANNELS;
|
dma_params.num_chan = EMAC_MAX_TXRX_CHANNELS;
|
||||||
dma_params.min_packet_size = EMAC_DEF_MIN_ETHPKTSIZE;
|
dma_params.min_packet_size = EMAC_DEF_MIN_ETHPKTSIZE;
|
||||||
dma_params.desc_mem_phys = hw_ram_addr;
|
dma_params.desc_hw_addr = hw_ram_addr;
|
||||||
dma_params.desc_mem_size = pdata->ctrl_ram_size;
|
dma_params.desc_mem_size = pdata->ctrl_ram_size;
|
||||||
dma_params.desc_align = 16;
|
dma_params.desc_align = 16;
|
||||||
|
|
||||||
|
dma_params.desc_mem_phys = pdata->no_bd_ram ? 0 :
|
||||||
|
(u32 __force)res->start + pdata->ctrl_ram_offset;
|
||||||
|
|
||||||
priv->dma = cpdma_ctlr_create(&dma_params);
|
priv->dma = cpdma_ctlr_create(&dma_params);
|
||||||
if (!priv->dma) {
|
if (!priv->dma) {
|
||||||
dev_err(emac_dev, "DaVinci EMAC: Error initializing DMA\n");
|
dev_err(emac_dev, "DaVinci EMAC: Error initializing DMA\n");
|
||||||
|
@ -62,6 +62,9 @@ u32 mlx4_bitmap_alloc(struct mlx4_bitmap *bitmap)
|
|||||||
} else
|
} else
|
||||||
obj = -1;
|
obj = -1;
|
||||||
|
|
||||||
|
if (obj != -1)
|
||||||
|
--bitmap->avail;
|
||||||
|
|
||||||
spin_unlock(&bitmap->lock);
|
spin_unlock(&bitmap->lock);
|
||||||
|
|
||||||
return obj;
|
return obj;
|
||||||
@ -101,11 +104,19 @@ u32 mlx4_bitmap_alloc_range(struct mlx4_bitmap *bitmap, int cnt, int align)
|
|||||||
} else
|
} else
|
||||||
obj = -1;
|
obj = -1;
|
||||||
|
|
||||||
|
if (obj != -1)
|
||||||
|
bitmap->avail -= cnt;
|
||||||
|
|
||||||
spin_unlock(&bitmap->lock);
|
spin_unlock(&bitmap->lock);
|
||||||
|
|
||||||
return obj;
|
return obj;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
u32 mlx4_bitmap_avail(struct mlx4_bitmap *bitmap)
|
||||||
|
{
|
||||||
|
return bitmap->avail;
|
||||||
|
}
|
||||||
|
|
||||||
void mlx4_bitmap_free_range(struct mlx4_bitmap *bitmap, u32 obj, int cnt)
|
void mlx4_bitmap_free_range(struct mlx4_bitmap *bitmap, u32 obj, int cnt)
|
||||||
{
|
{
|
||||||
obj &= bitmap->max + bitmap->reserved_top - 1;
|
obj &= bitmap->max + bitmap->reserved_top - 1;
|
||||||
@ -115,6 +126,7 @@ void mlx4_bitmap_free_range(struct mlx4_bitmap *bitmap, u32 obj, int cnt)
|
|||||||
bitmap->last = min(bitmap->last, obj);
|
bitmap->last = min(bitmap->last, obj);
|
||||||
bitmap->top = (bitmap->top + bitmap->max + bitmap->reserved_top)
|
bitmap->top = (bitmap->top + bitmap->max + bitmap->reserved_top)
|
||||||
& bitmap->mask;
|
& bitmap->mask;
|
||||||
|
bitmap->avail += cnt;
|
||||||
spin_unlock(&bitmap->lock);
|
spin_unlock(&bitmap->lock);
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -130,6 +142,7 @@ int mlx4_bitmap_init(struct mlx4_bitmap *bitmap, u32 num, u32 mask,
|
|||||||
bitmap->max = num - reserved_top;
|
bitmap->max = num - reserved_top;
|
||||||
bitmap->mask = mask;
|
bitmap->mask = mask;
|
||||||
bitmap->reserved_top = reserved_top;
|
bitmap->reserved_top = reserved_top;
|
||||||
|
bitmap->avail = num - reserved_top - reserved_bot;
|
||||||
spin_lock_init(&bitmap->lock);
|
spin_lock_init(&bitmap->lock);
|
||||||
bitmap->table = kzalloc(BITS_TO_LONGS(bitmap->max) *
|
bitmap->table = kzalloc(BITS_TO_LONGS(bitmap->max) *
|
||||||
sizeof (long), GFP_KERNEL);
|
sizeof (long), GFP_KERNEL);
|
||||||
|
@ -198,7 +198,7 @@ int mlx4_cq_alloc(struct mlx4_dev *dev, int nent, struct mlx4_mtt *mtt,
|
|||||||
u64 mtt_addr;
|
u64 mtt_addr;
|
||||||
int err;
|
int err;
|
||||||
|
|
||||||
if (vector >= dev->caps.num_comp_vectors)
|
if (vector > dev->caps.num_comp_vectors + dev->caps.comp_pool)
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
|
|
||||||
cq->vector = vector;
|
cq->vector = vector;
|
||||||
|
@ -51,13 +51,10 @@ int mlx4_en_create_cq(struct mlx4_en_priv *priv,
|
|||||||
int err;
|
int err;
|
||||||
|
|
||||||
cq->size = entries;
|
cq->size = entries;
|
||||||
if (mode == RX) {
|
if (mode == RX)
|
||||||
cq->buf_size = cq->size * sizeof(struct mlx4_cqe);
|
cq->buf_size = cq->size * sizeof(struct mlx4_cqe);
|
||||||
cq->vector = ring % mdev->dev->caps.num_comp_vectors;
|
else
|
||||||
} else {
|
|
||||||
cq->buf_size = sizeof(struct mlx4_cqe);
|
cq->buf_size = sizeof(struct mlx4_cqe);
|
||||||
cq->vector = 0;
|
|
||||||
}
|
|
||||||
|
|
||||||
cq->ring = ring;
|
cq->ring = ring;
|
||||||
cq->is_tx = mode;
|
cq->is_tx = mode;
|
||||||
@ -80,7 +77,8 @@ int mlx4_en_create_cq(struct mlx4_en_priv *priv,
|
|||||||
int mlx4_en_activate_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq *cq)
|
int mlx4_en_activate_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq *cq)
|
||||||
{
|
{
|
||||||
struct mlx4_en_dev *mdev = priv->mdev;
|
struct mlx4_en_dev *mdev = priv->mdev;
|
||||||
int err;
|
int err = 0;
|
||||||
|
char name[25];
|
||||||
|
|
||||||
cq->dev = mdev->pndev[priv->port];
|
cq->dev = mdev->pndev[priv->port];
|
||||||
cq->mcq.set_ci_db = cq->wqres.db.db;
|
cq->mcq.set_ci_db = cq->wqres.db.db;
|
||||||
@ -89,6 +87,29 @@ int mlx4_en_activate_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq *cq)
|
|||||||
*cq->mcq.arm_db = 0;
|
*cq->mcq.arm_db = 0;
|
||||||
memset(cq->buf, 0, cq->buf_size);
|
memset(cq->buf, 0, cq->buf_size);
|
||||||
|
|
||||||
|
if (cq->is_tx == RX) {
|
||||||
|
if (mdev->dev->caps.comp_pool) {
|
||||||
|
if (!cq->vector) {
|
||||||
|
sprintf(name , "%s-rx-%d", priv->dev->name, cq->ring);
|
||||||
|
if (mlx4_assign_eq(mdev->dev, name, &cq->vector)) {
|
||||||
|
cq->vector = (cq->ring + 1 + priv->port) %
|
||||||
|
mdev->dev->caps.num_comp_vectors;
|
||||||
|
mlx4_warn(mdev, "Failed Assigning an EQ to "
|
||||||
|
"%s_rx-%d ,Falling back to legacy EQ's\n",
|
||||||
|
priv->dev->name, cq->ring);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
cq->vector = (cq->ring + 1 + priv->port) %
|
||||||
|
mdev->dev->caps.num_comp_vectors;
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
if (!cq->vector || !mdev->dev->caps.comp_pool) {
|
||||||
|
/*Fallback to legacy pool in case of error*/
|
||||||
|
cq->vector = 0;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
if (!cq->is_tx)
|
if (!cq->is_tx)
|
||||||
cq->size = priv->rx_ring[cq->ring].actual_size;
|
cq->size = priv->rx_ring[cq->ring].actual_size;
|
||||||
|
|
||||||
@ -112,12 +133,15 @@ int mlx4_en_activate_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq *cq)
|
|||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
void mlx4_en_destroy_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq *cq)
|
void mlx4_en_destroy_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq *cq,
|
||||||
|
bool reserve_vectors)
|
||||||
{
|
{
|
||||||
struct mlx4_en_dev *mdev = priv->mdev;
|
struct mlx4_en_dev *mdev = priv->mdev;
|
||||||
|
|
||||||
mlx4_en_unmap_buffer(&cq->wqres.buf);
|
mlx4_en_unmap_buffer(&cq->wqres.buf);
|
||||||
mlx4_free_hwq_res(mdev->dev, &cq->wqres, cq->buf_size);
|
mlx4_free_hwq_res(mdev->dev, &cq->wqres, cq->buf_size);
|
||||||
|
if (priv->mdev->dev->caps.comp_pool && cq->vector && !reserve_vectors)
|
||||||
|
mlx4_release_eq(priv->mdev->dev, cq->vector);
|
||||||
cq->buf_size = 0;
|
cq->buf_size = 0;
|
||||||
cq->buf = NULL;
|
cq->buf = NULL;
|
||||||
}
|
}
|
||||||
|
@ -45,7 +45,7 @@ mlx4_en_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *drvinfo)
|
|||||||
struct mlx4_en_priv *priv = netdev_priv(dev);
|
struct mlx4_en_priv *priv = netdev_priv(dev);
|
||||||
struct mlx4_en_dev *mdev = priv->mdev;
|
struct mlx4_en_dev *mdev = priv->mdev;
|
||||||
|
|
||||||
sprintf(drvinfo->driver, DRV_NAME " (%s)", mdev->dev->board_id);
|
strncpy(drvinfo->driver, DRV_NAME, 32);
|
||||||
strncpy(drvinfo->version, DRV_VERSION " (" DRV_RELDATE ")", 32);
|
strncpy(drvinfo->version, DRV_VERSION " (" DRV_RELDATE ")", 32);
|
||||||
sprintf(drvinfo->fw_version, "%d.%d.%d",
|
sprintf(drvinfo->fw_version, "%d.%d.%d",
|
||||||
(u16) (mdev->dev->caps.fw_ver >> 32),
|
(u16) (mdev->dev->caps.fw_ver >> 32),
|
||||||
@ -131,8 +131,65 @@ static void mlx4_en_set_msglevel(struct net_device *dev, u32 val)
|
|||||||
static void mlx4_en_get_wol(struct net_device *netdev,
|
static void mlx4_en_get_wol(struct net_device *netdev,
|
||||||
struct ethtool_wolinfo *wol)
|
struct ethtool_wolinfo *wol)
|
||||||
{
|
{
|
||||||
wol->supported = 0;
|
struct mlx4_en_priv *priv = netdev_priv(netdev);
|
||||||
wol->wolopts = 0;
|
int err = 0;
|
||||||
|
u64 config = 0;
|
||||||
|
|
||||||
|
if (!priv->mdev->dev->caps.wol) {
|
||||||
|
wol->supported = 0;
|
||||||
|
wol->wolopts = 0;
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
err = mlx4_wol_read(priv->mdev->dev, &config, priv->port);
|
||||||
|
if (err) {
|
||||||
|
en_err(priv, "Failed to get WoL information\n");
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (config & MLX4_EN_WOL_MAGIC)
|
||||||
|
wol->supported = WAKE_MAGIC;
|
||||||
|
else
|
||||||
|
wol->supported = 0;
|
||||||
|
|
||||||
|
if (config & MLX4_EN_WOL_ENABLED)
|
||||||
|
wol->wolopts = WAKE_MAGIC;
|
||||||
|
else
|
||||||
|
wol->wolopts = 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
static int mlx4_en_set_wol(struct net_device *netdev,
|
||||||
|
struct ethtool_wolinfo *wol)
|
||||||
|
{
|
||||||
|
struct mlx4_en_priv *priv = netdev_priv(netdev);
|
||||||
|
u64 config = 0;
|
||||||
|
int err = 0;
|
||||||
|
|
||||||
|
if (!priv->mdev->dev->caps.wol)
|
||||||
|
return -EOPNOTSUPP;
|
||||||
|
|
||||||
|
if (wol->supported & ~WAKE_MAGIC)
|
||||||
|
return -EINVAL;
|
||||||
|
|
||||||
|
err = mlx4_wol_read(priv->mdev->dev, &config, priv->port);
|
||||||
|
if (err) {
|
||||||
|
en_err(priv, "Failed to get WoL info, unable to modify\n");
|
||||||
|
return err;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (wol->wolopts & WAKE_MAGIC) {
|
||||||
|
config |= MLX4_EN_WOL_DO_MODIFY | MLX4_EN_WOL_ENABLED |
|
||||||
|
MLX4_EN_WOL_MAGIC;
|
||||||
|
} else {
|
||||||
|
config &= ~(MLX4_EN_WOL_ENABLED | MLX4_EN_WOL_MAGIC);
|
||||||
|
config |= MLX4_EN_WOL_DO_MODIFY;
|
||||||
|
}
|
||||||
|
|
||||||
|
err = mlx4_wol_write(priv->mdev->dev, config, priv->port);
|
||||||
|
if (err)
|
||||||
|
en_err(priv, "Failed to set WoL information\n");
|
||||||
|
|
||||||
|
return err;
|
||||||
}
|
}
|
||||||
|
|
||||||
static int mlx4_en_get_sset_count(struct net_device *dev, int sset)
|
static int mlx4_en_get_sset_count(struct net_device *dev, int sset)
|
||||||
@ -388,7 +445,7 @@ static int mlx4_en_set_ringparam(struct net_device *dev,
|
|||||||
mlx4_en_stop_port(dev);
|
mlx4_en_stop_port(dev);
|
||||||
}
|
}
|
||||||
|
|
||||||
mlx4_en_free_resources(priv);
|
mlx4_en_free_resources(priv, true);
|
||||||
|
|
||||||
priv->prof->tx_ring_size = tx_size;
|
priv->prof->tx_ring_size = tx_size;
|
||||||
priv->prof->rx_ring_size = rx_size;
|
priv->prof->rx_ring_size = rx_size;
|
||||||
@ -442,6 +499,7 @@ const struct ethtool_ops mlx4_en_ethtool_ops = {
|
|||||||
.get_ethtool_stats = mlx4_en_get_ethtool_stats,
|
.get_ethtool_stats = mlx4_en_get_ethtool_stats,
|
||||||
.self_test = mlx4_en_self_test,
|
.self_test = mlx4_en_self_test,
|
||||||
.get_wol = mlx4_en_get_wol,
|
.get_wol = mlx4_en_get_wol,
|
||||||
|
.set_wol = mlx4_en_set_wol,
|
||||||
.get_msglevel = mlx4_en_get_msglevel,
|
.get_msglevel = mlx4_en_get_msglevel,
|
||||||
.set_msglevel = mlx4_en_set_msglevel,
|
.set_msglevel = mlx4_en_set_msglevel,
|
||||||
.get_coalesce = mlx4_en_get_coalesce,
|
.get_coalesce = mlx4_en_get_coalesce,
|
||||||
|
@ -241,16 +241,18 @@ static void *mlx4_en_add(struct mlx4_dev *dev)
|
|||||||
mlx4_foreach_port(i, dev, MLX4_PORT_TYPE_ETH)
|
mlx4_foreach_port(i, dev, MLX4_PORT_TYPE_ETH)
|
||||||
mdev->port_cnt++;
|
mdev->port_cnt++;
|
||||||
|
|
||||||
/* If we did not receive an explicit number of Rx rings, default to
|
|
||||||
* the number of completion vectors populated by the mlx4_core */
|
|
||||||
mlx4_foreach_port(i, dev, MLX4_PORT_TYPE_ETH) {
|
mlx4_foreach_port(i, dev, MLX4_PORT_TYPE_ETH) {
|
||||||
mlx4_info(mdev, "Using %d tx rings for port:%d\n",
|
if (!dev->caps.comp_pool) {
|
||||||
mdev->profile.prof[i].tx_ring_num, i);
|
mdev->profile.prof[i].rx_ring_num =
|
||||||
mdev->profile.prof[i].rx_ring_num = min_t(int,
|
rounddown_pow_of_two(max_t(int, MIN_RX_RINGS,
|
||||||
roundup_pow_of_two(dev->caps.num_comp_vectors),
|
min_t(int,
|
||||||
MAX_RX_RINGS);
|
dev->caps.num_comp_vectors,
|
||||||
mlx4_info(mdev, "Defaulting to %d rx rings for port:%d\n",
|
MAX_RX_RINGS)));
|
||||||
mdev->profile.prof[i].rx_ring_num, i);
|
} else {
|
||||||
|
mdev->profile.prof[i].rx_ring_num = rounddown_pow_of_two(
|
||||||
|
min_t(int, dev->caps.comp_pool/
|
||||||
|
dev->caps.num_ports - 1 , MAX_MSIX_P_PORT - 1));
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Create our own workqueue for reset/multicast tasks
|
/* Create our own workqueue for reset/multicast tasks
|
||||||
@ -294,7 +296,7 @@ static struct mlx4_interface mlx4_en_interface = {
|
|||||||
.remove = mlx4_en_remove,
|
.remove = mlx4_en_remove,
|
||||||
.event = mlx4_en_event,
|
.event = mlx4_en_event,
|
||||||
.get_dev = mlx4_en_get_netdev,
|
.get_dev = mlx4_en_get_netdev,
|
||||||
.protocol = MLX4_PROTOCOL_EN,
|
.protocol = MLX4_PROT_ETH,
|
||||||
};
|
};
|
||||||
|
|
||||||
static int __init mlx4_en_init(void)
|
static int __init mlx4_en_init(void)
|
||||||
|
@ -156,9 +156,8 @@ static void mlx4_en_do_set_mac(struct work_struct *work)
|
|||||||
mutex_lock(&mdev->state_lock);
|
mutex_lock(&mdev->state_lock);
|
||||||
if (priv->port_up) {
|
if (priv->port_up) {
|
||||||
/* Remove old MAC and insert the new one */
|
/* Remove old MAC and insert the new one */
|
||||||
mlx4_unregister_mac(mdev->dev, priv->port, priv->mac_index);
|
err = mlx4_replace_mac(mdev->dev, priv->port,
|
||||||
err = mlx4_register_mac(mdev->dev, priv->port,
|
priv->base_qpn, priv->mac, 0);
|
||||||
priv->mac, &priv->mac_index);
|
|
||||||
if (err)
|
if (err)
|
||||||
en_err(priv, "Failed changing HW MAC address\n");
|
en_err(priv, "Failed changing HW MAC address\n");
|
||||||
} else
|
} else
|
||||||
@ -214,6 +213,7 @@ static void mlx4_en_do_set_multicast(struct work_struct *work)
|
|||||||
struct mlx4_en_dev *mdev = priv->mdev;
|
struct mlx4_en_dev *mdev = priv->mdev;
|
||||||
struct net_device *dev = priv->dev;
|
struct net_device *dev = priv->dev;
|
||||||
u64 mcast_addr = 0;
|
u64 mcast_addr = 0;
|
||||||
|
u8 mc_list[16] = {0};
|
||||||
int err;
|
int err;
|
||||||
|
|
||||||
mutex_lock(&mdev->state_lock);
|
mutex_lock(&mdev->state_lock);
|
||||||
@ -239,8 +239,12 @@ static void mlx4_en_do_set_multicast(struct work_struct *work)
|
|||||||
priv->flags |= MLX4_EN_FLAG_PROMISC;
|
priv->flags |= MLX4_EN_FLAG_PROMISC;
|
||||||
|
|
||||||
/* Enable promiscouos mode */
|
/* Enable promiscouos mode */
|
||||||
err = mlx4_SET_PORT_qpn_calc(mdev->dev, priv->port,
|
if (!mdev->dev->caps.vep_uc_steering)
|
||||||
priv->base_qpn, 1);
|
err = mlx4_SET_PORT_qpn_calc(mdev->dev, priv->port,
|
||||||
|
priv->base_qpn, 1);
|
||||||
|
else
|
||||||
|
err = mlx4_unicast_promisc_add(mdev->dev, priv->base_qpn,
|
||||||
|
priv->port);
|
||||||
if (err)
|
if (err)
|
||||||
en_err(priv, "Failed enabling "
|
en_err(priv, "Failed enabling "
|
||||||
"promiscous mode\n");
|
"promiscous mode\n");
|
||||||
@ -252,10 +256,21 @@ static void mlx4_en_do_set_multicast(struct work_struct *work)
|
|||||||
en_err(priv, "Failed disabling "
|
en_err(priv, "Failed disabling "
|
||||||
"multicast filter\n");
|
"multicast filter\n");
|
||||||
|
|
||||||
/* Disable port VLAN filter */
|
/* Add the default qp number as multicast promisc */
|
||||||
err = mlx4_SET_VLAN_FLTR(mdev->dev, priv->port, NULL);
|
if (!(priv->flags & MLX4_EN_FLAG_MC_PROMISC)) {
|
||||||
if (err)
|
err = mlx4_multicast_promisc_add(mdev->dev, priv->base_qpn,
|
||||||
en_err(priv, "Failed disabling VLAN filter\n");
|
priv->port);
|
||||||
|
if (err)
|
||||||
|
en_err(priv, "Failed entering multicast promisc mode\n");
|
||||||
|
priv->flags |= MLX4_EN_FLAG_MC_PROMISC;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (priv->vlgrp) {
|
||||||
|
/* Disable port VLAN filter */
|
||||||
|
err = mlx4_SET_VLAN_FLTR(mdev->dev, priv->port, NULL);
|
||||||
|
if (err)
|
||||||
|
en_err(priv, "Failed disabling VLAN filter\n");
|
||||||
|
}
|
||||||
}
|
}
|
||||||
goto out;
|
goto out;
|
||||||
}
|
}
|
||||||
@ -270,11 +285,24 @@ static void mlx4_en_do_set_multicast(struct work_struct *work)
|
|||||||
priv->flags &= ~MLX4_EN_FLAG_PROMISC;
|
priv->flags &= ~MLX4_EN_FLAG_PROMISC;
|
||||||
|
|
||||||
/* Disable promiscouos mode */
|
/* Disable promiscouos mode */
|
||||||
err = mlx4_SET_PORT_qpn_calc(mdev->dev, priv->port,
|
if (!mdev->dev->caps.vep_uc_steering)
|
||||||
priv->base_qpn, 0);
|
err = mlx4_SET_PORT_qpn_calc(mdev->dev, priv->port,
|
||||||
|
priv->base_qpn, 0);
|
||||||
|
else
|
||||||
|
err = mlx4_unicast_promisc_remove(mdev->dev, priv->base_qpn,
|
||||||
|
priv->port);
|
||||||
if (err)
|
if (err)
|
||||||
en_err(priv, "Failed disabling promiscous mode\n");
|
en_err(priv, "Failed disabling promiscous mode\n");
|
||||||
|
|
||||||
|
/* Disable Multicast promisc */
|
||||||
|
if (priv->flags & MLX4_EN_FLAG_MC_PROMISC) {
|
||||||
|
err = mlx4_multicast_promisc_remove(mdev->dev, priv->base_qpn,
|
||||||
|
priv->port);
|
||||||
|
if (err)
|
||||||
|
en_err(priv, "Failed disabling multicast promiscous mode\n");
|
||||||
|
priv->flags &= ~MLX4_EN_FLAG_MC_PROMISC;
|
||||||
|
}
|
||||||
|
|
||||||
/* Enable port VLAN filter */
|
/* Enable port VLAN filter */
|
||||||
err = mlx4_SET_VLAN_FLTR(mdev->dev, priv->port, priv->vlgrp);
|
err = mlx4_SET_VLAN_FLTR(mdev->dev, priv->port, priv->vlgrp);
|
||||||
if (err)
|
if (err)
|
||||||
@ -287,14 +315,38 @@ static void mlx4_en_do_set_multicast(struct work_struct *work)
|
|||||||
0, MLX4_MCAST_DISABLE);
|
0, MLX4_MCAST_DISABLE);
|
||||||
if (err)
|
if (err)
|
||||||
en_err(priv, "Failed disabling multicast filter\n");
|
en_err(priv, "Failed disabling multicast filter\n");
|
||||||
|
|
||||||
|
/* Add the default qp number as multicast promisc */
|
||||||
|
if (!(priv->flags & MLX4_EN_FLAG_MC_PROMISC)) {
|
||||||
|
err = mlx4_multicast_promisc_add(mdev->dev, priv->base_qpn,
|
||||||
|
priv->port);
|
||||||
|
if (err)
|
||||||
|
en_err(priv, "Failed entering multicast promisc mode\n");
|
||||||
|
priv->flags |= MLX4_EN_FLAG_MC_PROMISC;
|
||||||
|
}
|
||||||
} else {
|
} else {
|
||||||
int i;
|
int i;
|
||||||
|
/* Disable Multicast promisc */
|
||||||
|
if (priv->flags & MLX4_EN_FLAG_MC_PROMISC) {
|
||||||
|
err = mlx4_multicast_promisc_remove(mdev->dev, priv->base_qpn,
|
||||||
|
priv->port);
|
||||||
|
if (err)
|
||||||
|
en_err(priv, "Failed disabling multicast promiscous mode\n");
|
||||||
|
priv->flags &= ~MLX4_EN_FLAG_MC_PROMISC;
|
||||||
|
}
|
||||||
|
|
||||||
err = mlx4_SET_MCAST_FLTR(mdev->dev, priv->port, 0,
|
err = mlx4_SET_MCAST_FLTR(mdev->dev, priv->port, 0,
|
||||||
0, MLX4_MCAST_DISABLE);
|
0, MLX4_MCAST_DISABLE);
|
||||||
if (err)
|
if (err)
|
||||||
en_err(priv, "Failed disabling multicast filter\n");
|
en_err(priv, "Failed disabling multicast filter\n");
|
||||||
|
|
||||||
|
/* Detach our qp from all the multicast addresses */
|
||||||
|
for (i = 0; i < priv->mc_addrs_cnt; i++) {
|
||||||
|
memcpy(&mc_list[10], priv->mc_addrs + i * ETH_ALEN, ETH_ALEN);
|
||||||
|
mc_list[5] = priv->port;
|
||||||
|
mlx4_multicast_detach(mdev->dev, &priv->rss_map.indir_qp,
|
||||||
|
mc_list, MLX4_PROT_ETH);
|
||||||
|
}
|
||||||
/* Flush mcast filter and init it with broadcast address */
|
/* Flush mcast filter and init it with broadcast address */
|
||||||
mlx4_SET_MCAST_FLTR(mdev->dev, priv->port, ETH_BCAST,
|
mlx4_SET_MCAST_FLTR(mdev->dev, priv->port, ETH_BCAST,
|
||||||
1, MLX4_MCAST_CONFIG);
|
1, MLX4_MCAST_CONFIG);
|
||||||
@ -307,6 +359,10 @@ static void mlx4_en_do_set_multicast(struct work_struct *work)
|
|||||||
for (i = 0; i < priv->mc_addrs_cnt; i++) {
|
for (i = 0; i < priv->mc_addrs_cnt; i++) {
|
||||||
mcast_addr =
|
mcast_addr =
|
||||||
mlx4_en_mac_to_u64(priv->mc_addrs + i * ETH_ALEN);
|
mlx4_en_mac_to_u64(priv->mc_addrs + i * ETH_ALEN);
|
||||||
|
memcpy(&mc_list[10], priv->mc_addrs + i * ETH_ALEN, ETH_ALEN);
|
||||||
|
mc_list[5] = priv->port;
|
||||||
|
mlx4_multicast_attach(mdev->dev, &priv->rss_map.indir_qp,
|
||||||
|
mc_list, 0, MLX4_PROT_ETH);
|
||||||
mlx4_SET_MCAST_FLTR(mdev->dev, priv->port,
|
mlx4_SET_MCAST_FLTR(mdev->dev, priv->port,
|
||||||
mcast_addr, 0, MLX4_MCAST_CONFIG);
|
mcast_addr, 0, MLX4_MCAST_CONFIG);
|
||||||
}
|
}
|
||||||
@ -314,8 +370,6 @@ static void mlx4_en_do_set_multicast(struct work_struct *work)
|
|||||||
0, MLX4_MCAST_ENABLE);
|
0, MLX4_MCAST_ENABLE);
|
||||||
if (err)
|
if (err)
|
||||||
en_err(priv, "Failed enabling multicast filter\n");
|
en_err(priv, "Failed enabling multicast filter\n");
|
||||||
|
|
||||||
mlx4_en_clear_list(dev);
|
|
||||||
}
|
}
|
||||||
out:
|
out:
|
||||||
mutex_unlock(&mdev->state_lock);
|
mutex_unlock(&mdev->state_lock);
|
||||||
@ -417,7 +471,6 @@ static void mlx4_en_auto_moderation(struct mlx4_en_priv *priv)
|
|||||||
unsigned long avg_pkt_size;
|
unsigned long avg_pkt_size;
|
||||||
unsigned long rx_packets;
|
unsigned long rx_packets;
|
||||||
unsigned long rx_bytes;
|
unsigned long rx_bytes;
|
||||||
unsigned long rx_byte_diff;
|
|
||||||
unsigned long tx_packets;
|
unsigned long tx_packets;
|
||||||
unsigned long tx_pkt_diff;
|
unsigned long tx_pkt_diff;
|
||||||
unsigned long rx_pkt_diff;
|
unsigned long rx_pkt_diff;
|
||||||
@ -441,25 +494,20 @@ static void mlx4_en_auto_moderation(struct mlx4_en_priv *priv)
|
|||||||
rx_pkt_diff = ((unsigned long) (rx_packets -
|
rx_pkt_diff = ((unsigned long) (rx_packets -
|
||||||
priv->last_moder_packets));
|
priv->last_moder_packets));
|
||||||
packets = max(tx_pkt_diff, rx_pkt_diff);
|
packets = max(tx_pkt_diff, rx_pkt_diff);
|
||||||
rx_byte_diff = rx_bytes - priv->last_moder_bytes;
|
|
||||||
rx_byte_diff = rx_byte_diff ? rx_byte_diff : 1;
|
|
||||||
rate = packets * HZ / period;
|
rate = packets * HZ / period;
|
||||||
avg_pkt_size = packets ? ((unsigned long) (rx_bytes -
|
avg_pkt_size = packets ? ((unsigned long) (rx_bytes -
|
||||||
priv->last_moder_bytes)) / packets : 0;
|
priv->last_moder_bytes)) / packets : 0;
|
||||||
|
|
||||||
/* Apply auto-moderation only when packet rate exceeds a rate that
|
/* Apply auto-moderation only when packet rate exceeds a rate that
|
||||||
* it matters */
|
* it matters */
|
||||||
if (rate > MLX4_EN_RX_RATE_THRESH) {
|
if (rate > MLX4_EN_RX_RATE_THRESH && avg_pkt_size > MLX4_EN_AVG_PKT_SMALL) {
|
||||||
/* If tx and rx packet rates are not balanced, assume that
|
/* If tx and rx packet rates are not balanced, assume that
|
||||||
* traffic is mainly BW bound and apply maximum moderation.
|
* traffic is mainly BW bound and apply maximum moderation.
|
||||||
* Otherwise, moderate according to packet rate */
|
* Otherwise, moderate according to packet rate */
|
||||||
if (2 * tx_pkt_diff > 3 * rx_pkt_diff &&
|
if (2 * tx_pkt_diff > 3 * rx_pkt_diff ||
|
||||||
rx_pkt_diff / rx_byte_diff <
|
2 * rx_pkt_diff > 3 * tx_pkt_diff) {
|
||||||
MLX4_EN_SMALL_PKT_SIZE)
|
|
||||||
moder_time = priv->rx_usecs_low;
|
|
||||||
else if (2 * rx_pkt_diff > 3 * tx_pkt_diff)
|
|
||||||
moder_time = priv->rx_usecs_high;
|
moder_time = priv->rx_usecs_high;
|
||||||
else {
|
} else {
|
||||||
if (rate < priv->pkt_rate_low)
|
if (rate < priv->pkt_rate_low)
|
||||||
moder_time = priv->rx_usecs_low;
|
moder_time = priv->rx_usecs_low;
|
||||||
else if (rate > priv->pkt_rate_high)
|
else if (rate > priv->pkt_rate_high)
|
||||||
@ -471,9 +519,7 @@ static void mlx4_en_auto_moderation(struct mlx4_en_priv *priv)
|
|||||||
priv->rx_usecs_low;
|
priv->rx_usecs_low;
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
/* When packet rate is low, use default moderation rather than
|
moder_time = priv->rx_usecs_low;
|
||||||
* 0 to prevent interrupt storms if traffic suddenly increases */
|
|
||||||
moder_time = priv->rx_usecs;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
en_dbg(INTR, priv, "tx rate:%lu rx_rate:%lu\n",
|
en_dbg(INTR, priv, "tx rate:%lu rx_rate:%lu\n",
|
||||||
@ -565,6 +611,8 @@ int mlx4_en_start_port(struct net_device *dev)
|
|||||||
int err = 0;
|
int err = 0;
|
||||||
int i;
|
int i;
|
||||||
int j;
|
int j;
|
||||||
|
u8 mc_list[16] = {0};
|
||||||
|
char name[32];
|
||||||
|
|
||||||
if (priv->port_up) {
|
if (priv->port_up) {
|
||||||
en_dbg(DRV, priv, "start port called while port already up\n");
|
en_dbg(DRV, priv, "start port called while port already up\n");
|
||||||
@ -603,16 +651,35 @@ int mlx4_en_start_port(struct net_device *dev)
|
|||||||
++rx_index;
|
++rx_index;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/* Set port mac number */
|
||||||
|
en_dbg(DRV, priv, "Setting mac for port %d\n", priv->port);
|
||||||
|
err = mlx4_register_mac(mdev->dev, priv->port,
|
||||||
|
priv->mac, &priv->base_qpn, 0);
|
||||||
|
if (err) {
|
||||||
|
en_err(priv, "Failed setting port mac\n");
|
||||||
|
goto cq_err;
|
||||||
|
}
|
||||||
|
mdev->mac_removed[priv->port] = 0;
|
||||||
|
|
||||||
err = mlx4_en_config_rss_steer(priv);
|
err = mlx4_en_config_rss_steer(priv);
|
||||||
if (err) {
|
if (err) {
|
||||||
en_err(priv, "Failed configuring rss steering\n");
|
en_err(priv, "Failed configuring rss steering\n");
|
||||||
goto cq_err;
|
goto mac_err;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if (mdev->dev->caps.comp_pool && !priv->tx_vector) {
|
||||||
|
sprintf(name , "%s-tx", priv->dev->name);
|
||||||
|
if (mlx4_assign_eq(mdev->dev , name, &priv->tx_vector)) {
|
||||||
|
mlx4_warn(mdev, "Failed Assigning an EQ to "
|
||||||
|
"%s_tx ,Falling back to legacy "
|
||||||
|
"EQ's\n", priv->dev->name);
|
||||||
|
}
|
||||||
|
}
|
||||||
/* Configure tx cq's and rings */
|
/* Configure tx cq's and rings */
|
||||||
for (i = 0; i < priv->tx_ring_num; i++) {
|
for (i = 0; i < priv->tx_ring_num; i++) {
|
||||||
/* Configure cq */
|
/* Configure cq */
|
||||||
cq = &priv->tx_cq[i];
|
cq = &priv->tx_cq[i];
|
||||||
|
cq->vector = priv->tx_vector;
|
||||||
err = mlx4_en_activate_cq(priv, cq);
|
err = mlx4_en_activate_cq(priv, cq);
|
||||||
if (err) {
|
if (err) {
|
||||||
en_err(priv, "Failed allocating Tx CQ\n");
|
en_err(priv, "Failed allocating Tx CQ\n");
|
||||||
@ -659,24 +726,22 @@ int mlx4_en_start_port(struct net_device *dev)
|
|||||||
en_err(priv, "Failed setting default qp numbers\n");
|
en_err(priv, "Failed setting default qp numbers\n");
|
||||||
goto tx_err;
|
goto tx_err;
|
||||||
}
|
}
|
||||||
/* Set port mac number */
|
|
||||||
en_dbg(DRV, priv, "Setting mac for port %d\n", priv->port);
|
|
||||||
err = mlx4_register_mac(mdev->dev, priv->port,
|
|
||||||
priv->mac, &priv->mac_index);
|
|
||||||
if (err) {
|
|
||||||
en_err(priv, "Failed setting port mac\n");
|
|
||||||
goto tx_err;
|
|
||||||
}
|
|
||||||
mdev->mac_removed[priv->port] = 0;
|
|
||||||
|
|
||||||
/* Init port */
|
/* Init port */
|
||||||
en_dbg(HW, priv, "Initializing port\n");
|
en_dbg(HW, priv, "Initializing port\n");
|
||||||
err = mlx4_INIT_PORT(mdev->dev, priv->port);
|
err = mlx4_INIT_PORT(mdev->dev, priv->port);
|
||||||
if (err) {
|
if (err) {
|
||||||
en_err(priv, "Failed Initializing port\n");
|
en_err(priv, "Failed Initializing port\n");
|
||||||
goto mac_err;
|
goto tx_err;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/* Attach rx QP to bradcast address */
|
||||||
|
memset(&mc_list[10], 0xff, ETH_ALEN);
|
||||||
|
mc_list[5] = priv->port;
|
||||||
|
if (mlx4_multicast_attach(mdev->dev, &priv->rss_map.indir_qp, mc_list,
|
||||||
|
0, MLX4_PROT_ETH))
|
||||||
|
mlx4_warn(mdev, "Failed Attaching Broadcast\n");
|
||||||
|
|
||||||
/* Schedule multicast task to populate multicast list */
|
/* Schedule multicast task to populate multicast list */
|
||||||
queue_work(mdev->workqueue, &priv->mcast_task);
|
queue_work(mdev->workqueue, &priv->mcast_task);
|
||||||
|
|
||||||
@ -684,8 +749,6 @@ int mlx4_en_start_port(struct net_device *dev)
|
|||||||
netif_tx_start_all_queues(dev);
|
netif_tx_start_all_queues(dev);
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
mac_err:
|
|
||||||
mlx4_unregister_mac(mdev->dev, priv->port, priv->mac_index);
|
|
||||||
tx_err:
|
tx_err:
|
||||||
while (tx_index--) {
|
while (tx_index--) {
|
||||||
mlx4_en_deactivate_tx_ring(priv, &priv->tx_ring[tx_index]);
|
mlx4_en_deactivate_tx_ring(priv, &priv->tx_ring[tx_index]);
|
||||||
@ -693,6 +756,8 @@ tx_err:
|
|||||||
}
|
}
|
||||||
|
|
||||||
mlx4_en_release_rss_steer(priv);
|
mlx4_en_release_rss_steer(priv);
|
||||||
|
mac_err:
|
||||||
|
mlx4_unregister_mac(mdev->dev, priv->port, priv->base_qpn);
|
||||||
cq_err:
|
cq_err:
|
||||||
while (rx_index--)
|
while (rx_index--)
|
||||||
mlx4_en_deactivate_cq(priv, &priv->rx_cq[rx_index]);
|
mlx4_en_deactivate_cq(priv, &priv->rx_cq[rx_index]);
|
||||||
@ -708,6 +773,7 @@ void mlx4_en_stop_port(struct net_device *dev)
|
|||||||
struct mlx4_en_priv *priv = netdev_priv(dev);
|
struct mlx4_en_priv *priv = netdev_priv(dev);
|
||||||
struct mlx4_en_dev *mdev = priv->mdev;
|
struct mlx4_en_dev *mdev = priv->mdev;
|
||||||
int i;
|
int i;
|
||||||
|
u8 mc_list[16] = {0};
|
||||||
|
|
||||||
if (!priv->port_up) {
|
if (!priv->port_up) {
|
||||||
en_dbg(DRV, priv, "stop port called while port already down\n");
|
en_dbg(DRV, priv, "stop port called while port already down\n");
|
||||||
@ -722,8 +788,23 @@ void mlx4_en_stop_port(struct net_device *dev)
|
|||||||
/* Set port as not active */
|
/* Set port as not active */
|
||||||
priv->port_up = false;
|
priv->port_up = false;
|
||||||
|
|
||||||
|
/* Detach All multicasts */
|
||||||
|
memset(&mc_list[10], 0xff, ETH_ALEN);
|
||||||
|
mc_list[5] = priv->port;
|
||||||
|
mlx4_multicast_detach(mdev->dev, &priv->rss_map.indir_qp, mc_list,
|
||||||
|
MLX4_PROT_ETH);
|
||||||
|
for (i = 0; i < priv->mc_addrs_cnt; i++) {
|
||||||
|
memcpy(&mc_list[10], priv->mc_addrs + i * ETH_ALEN, ETH_ALEN);
|
||||||
|
mc_list[5] = priv->port;
|
||||||
|
mlx4_multicast_detach(mdev->dev, &priv->rss_map.indir_qp,
|
||||||
|
mc_list, MLX4_PROT_ETH);
|
||||||
|
}
|
||||||
|
mlx4_en_clear_list(dev);
|
||||||
|
/* Flush multicast filter */
|
||||||
|
mlx4_SET_MCAST_FLTR(mdev->dev, priv->port, 0, 1, MLX4_MCAST_CONFIG);
|
||||||
|
|
||||||
/* Unregister Mac address for the port */
|
/* Unregister Mac address for the port */
|
||||||
mlx4_unregister_mac(mdev->dev, priv->port, priv->mac_index);
|
mlx4_unregister_mac(mdev->dev, priv->port, priv->base_qpn);
|
||||||
mdev->mac_removed[priv->port] = 1;
|
mdev->mac_removed[priv->port] = 1;
|
||||||
|
|
||||||
/* Free TX Rings */
|
/* Free TX Rings */
|
||||||
@ -801,7 +882,6 @@ static int mlx4_en_open(struct net_device *dev)
|
|||||||
priv->rx_ring[i].packets = 0;
|
priv->rx_ring[i].packets = 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
mlx4_en_set_default_moderation(priv);
|
|
||||||
err = mlx4_en_start_port(dev);
|
err = mlx4_en_start_port(dev);
|
||||||
if (err)
|
if (err)
|
||||||
en_err(priv, "Failed starting port:%d\n", priv->port);
|
en_err(priv, "Failed starting port:%d\n", priv->port);
|
||||||
@ -828,7 +908,7 @@ static int mlx4_en_close(struct net_device *dev)
|
|||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
void mlx4_en_free_resources(struct mlx4_en_priv *priv)
|
void mlx4_en_free_resources(struct mlx4_en_priv *priv, bool reserve_vectors)
|
||||||
{
|
{
|
||||||
int i;
|
int i;
|
||||||
|
|
||||||
@ -836,14 +916,14 @@ void mlx4_en_free_resources(struct mlx4_en_priv *priv)
|
|||||||
if (priv->tx_ring[i].tx_info)
|
if (priv->tx_ring[i].tx_info)
|
||||||
mlx4_en_destroy_tx_ring(priv, &priv->tx_ring[i]);
|
mlx4_en_destroy_tx_ring(priv, &priv->tx_ring[i]);
|
||||||
if (priv->tx_cq[i].buf)
|
if (priv->tx_cq[i].buf)
|
||||||
mlx4_en_destroy_cq(priv, &priv->tx_cq[i]);
|
mlx4_en_destroy_cq(priv, &priv->tx_cq[i], reserve_vectors);
|
||||||
}
|
}
|
||||||
|
|
||||||
for (i = 0; i < priv->rx_ring_num; i++) {
|
for (i = 0; i < priv->rx_ring_num; i++) {
|
||||||
if (priv->rx_ring[i].rx_info)
|
if (priv->rx_ring[i].rx_info)
|
||||||
mlx4_en_destroy_rx_ring(priv, &priv->rx_ring[i]);
|
mlx4_en_destroy_rx_ring(priv, &priv->rx_ring[i]);
|
||||||
if (priv->rx_cq[i].buf)
|
if (priv->rx_cq[i].buf)
|
||||||
mlx4_en_destroy_cq(priv, &priv->rx_cq[i]);
|
mlx4_en_destroy_cq(priv, &priv->rx_cq[i], reserve_vectors);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -851,6 +931,13 @@ int mlx4_en_alloc_resources(struct mlx4_en_priv *priv)
|
|||||||
{
|
{
|
||||||
struct mlx4_en_port_profile *prof = priv->prof;
|
struct mlx4_en_port_profile *prof = priv->prof;
|
||||||
int i;
|
int i;
|
||||||
|
int base_tx_qpn, err;
|
||||||
|
|
||||||
|
err = mlx4_qp_reserve_range(priv->mdev->dev, priv->tx_ring_num, 256, &base_tx_qpn);
|
||||||
|
if (err) {
|
||||||
|
en_err(priv, "failed reserving range for TX rings\n");
|
||||||
|
return err;
|
||||||
|
}
|
||||||
|
|
||||||
/* Create tx Rings */
|
/* Create tx Rings */
|
||||||
for (i = 0; i < priv->tx_ring_num; i++) {
|
for (i = 0; i < priv->tx_ring_num; i++) {
|
||||||
@ -858,7 +945,7 @@ int mlx4_en_alloc_resources(struct mlx4_en_priv *priv)
|
|||||||
prof->tx_ring_size, i, TX))
|
prof->tx_ring_size, i, TX))
|
||||||
goto err;
|
goto err;
|
||||||
|
|
||||||
if (mlx4_en_create_tx_ring(priv, &priv->tx_ring[i],
|
if (mlx4_en_create_tx_ring(priv, &priv->tx_ring[i], base_tx_qpn + i,
|
||||||
prof->tx_ring_size, TXBB_SIZE))
|
prof->tx_ring_size, TXBB_SIZE))
|
||||||
goto err;
|
goto err;
|
||||||
}
|
}
|
||||||
@ -878,6 +965,7 @@ int mlx4_en_alloc_resources(struct mlx4_en_priv *priv)
|
|||||||
|
|
||||||
err:
|
err:
|
||||||
en_err(priv, "Failed to allocate NIC resources\n");
|
en_err(priv, "Failed to allocate NIC resources\n");
|
||||||
|
mlx4_qp_release_range(priv->mdev->dev, base_tx_qpn, priv->tx_ring_num);
|
||||||
return -ENOMEM;
|
return -ENOMEM;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -905,7 +993,7 @@ void mlx4_en_destroy_netdev(struct net_device *dev)
|
|||||||
mdev->pndev[priv->port] = NULL;
|
mdev->pndev[priv->port] = NULL;
|
||||||
mutex_unlock(&mdev->state_lock);
|
mutex_unlock(&mdev->state_lock);
|
||||||
|
|
||||||
mlx4_en_free_resources(priv);
|
mlx4_en_free_resources(priv, false);
|
||||||
free_netdev(dev);
|
free_netdev(dev);
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -932,7 +1020,6 @@ static int mlx4_en_change_mtu(struct net_device *dev, int new_mtu)
|
|||||||
en_dbg(DRV, priv, "Change MTU called with card down!?\n");
|
en_dbg(DRV, priv, "Change MTU called with card down!?\n");
|
||||||
} else {
|
} else {
|
||||||
mlx4_en_stop_port(dev);
|
mlx4_en_stop_port(dev);
|
||||||
mlx4_en_set_default_moderation(priv);
|
|
||||||
err = mlx4_en_start_port(dev);
|
err = mlx4_en_start_port(dev);
|
||||||
if (err) {
|
if (err) {
|
||||||
en_err(priv, "Failed restarting port:%d\n",
|
en_err(priv, "Failed restarting port:%d\n",
|
||||||
@ -1079,7 +1166,25 @@ int mlx4_en_init_netdev(struct mlx4_en_dev *mdev, int port,
|
|||||||
en_warn(priv, "Using %d TX rings\n", prof->tx_ring_num);
|
en_warn(priv, "Using %d TX rings\n", prof->tx_ring_num);
|
||||||
en_warn(priv, "Using %d RX rings\n", prof->rx_ring_num);
|
en_warn(priv, "Using %d RX rings\n", prof->rx_ring_num);
|
||||||
|
|
||||||
|
/* Configure port */
|
||||||
|
err = mlx4_SET_PORT_general(mdev->dev, priv->port,
|
||||||
|
MLX4_EN_MIN_MTU,
|
||||||
|
0, 0, 0, 0);
|
||||||
|
if (err) {
|
||||||
|
en_err(priv, "Failed setting port general configurations "
|
||||||
|
"for port %d, with error %d\n", priv->port, err);
|
||||||
|
goto out;
|
||||||
|
}
|
||||||
|
|
||||||
|
/* Init port */
|
||||||
|
en_warn(priv, "Initializing port\n");
|
||||||
|
err = mlx4_INIT_PORT(mdev->dev, priv->port);
|
||||||
|
if (err) {
|
||||||
|
en_err(priv, "Failed Initializing port\n");
|
||||||
|
goto out;
|
||||||
|
}
|
||||||
priv->registered = 1;
|
priv->registered = 1;
|
||||||
|
mlx4_en_set_default_moderation(priv);
|
||||||
queue_delayed_work(mdev->workqueue, &priv->stats_task, STATS_DELAY);
|
queue_delayed_work(mdev->workqueue, &priv->stats_task, STATS_DELAY);
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
|
@ -119,6 +119,10 @@ int mlx4_SET_PORT_qpn_calc(struct mlx4_dev *dev, u8 port, u32 base_qpn,
|
|||||||
struct mlx4_set_port_rqp_calc_context *context;
|
struct mlx4_set_port_rqp_calc_context *context;
|
||||||
int err;
|
int err;
|
||||||
u32 in_mod;
|
u32 in_mod;
|
||||||
|
u32 m_promisc = (dev->caps.vep_mc_steering) ? MCAST_DIRECT : MCAST_DEFAULT;
|
||||||
|
|
||||||
|
if (dev->caps.vep_mc_steering && dev->caps.vep_uc_steering)
|
||||||
|
return 0;
|
||||||
|
|
||||||
mailbox = mlx4_alloc_cmd_mailbox(dev);
|
mailbox = mlx4_alloc_cmd_mailbox(dev);
|
||||||
if (IS_ERR(mailbox))
|
if (IS_ERR(mailbox))
|
||||||
@ -127,8 +131,11 @@ int mlx4_SET_PORT_qpn_calc(struct mlx4_dev *dev, u8 port, u32 base_qpn,
|
|||||||
memset(context, 0, sizeof *context);
|
memset(context, 0, sizeof *context);
|
||||||
|
|
||||||
context->base_qpn = cpu_to_be32(base_qpn);
|
context->base_qpn = cpu_to_be32(base_qpn);
|
||||||
context->promisc = cpu_to_be32(promisc << SET_PORT_PROMISC_EN_SHIFT | base_qpn);
|
context->n_mac = 0x7;
|
||||||
context->mcast = cpu_to_be32(1 << SET_PORT_PROMISC_MODE_SHIFT | base_qpn);
|
context->promisc = cpu_to_be32(promisc << SET_PORT_PROMISC_SHIFT |
|
||||||
|
base_qpn);
|
||||||
|
context->mcast = cpu_to_be32(m_promisc << SET_PORT_MC_PROMISC_SHIFT |
|
||||||
|
base_qpn);
|
||||||
context->intra_no_vlan = 0;
|
context->intra_no_vlan = 0;
|
||||||
context->no_vlan = MLX4_NO_VLAN_IDX;
|
context->no_vlan = MLX4_NO_VLAN_IDX;
|
||||||
context->intra_vlan_miss = 0;
|
context->intra_vlan_miss = 0;
|
||||||
@ -206,7 +213,7 @@ int mlx4_en_DUMP_ETH_STATS(struct mlx4_en_dev *mdev, u8 port, u8 reset)
|
|||||||
}
|
}
|
||||||
stats->tx_packets = 0;
|
stats->tx_packets = 0;
|
||||||
stats->tx_bytes = 0;
|
stats->tx_bytes = 0;
|
||||||
for (i = 0; i <= priv->tx_ring_num; i++) {
|
for (i = 0; i < priv->tx_ring_num; i++) {
|
||||||
stats->tx_packets += priv->tx_ring[i].packets;
|
stats->tx_packets += priv->tx_ring[i].packets;
|
||||||
stats->tx_bytes += priv->tx_ring[i].bytes;
|
stats->tx_bytes += priv->tx_ring[i].bytes;
|
||||||
}
|
}
|
||||||
|
@ -36,8 +36,8 @@
|
|||||||
|
|
||||||
|
|
||||||
#define SET_PORT_GEN_ALL_VALID 0x7
|
#define SET_PORT_GEN_ALL_VALID 0x7
|
||||||
#define SET_PORT_PROMISC_EN_SHIFT 31
|
#define SET_PORT_PROMISC_SHIFT 31
|
||||||
#define SET_PORT_PROMISC_MODE_SHIFT 30
|
#define SET_PORT_MC_PROMISC_SHIFT 30
|
||||||
|
|
||||||
enum {
|
enum {
|
||||||
MLX4_CMD_SET_VLAN_FLTR = 0x47,
|
MLX4_CMD_SET_VLAN_FLTR = 0x47,
|
||||||
@ -45,6 +45,12 @@ enum {
|
|||||||
MLX4_CMD_DUMP_ETH_STATS = 0x49,
|
MLX4_CMD_DUMP_ETH_STATS = 0x49,
|
||||||
};
|
};
|
||||||
|
|
||||||
|
enum {
|
||||||
|
MCAST_DIRECT_ONLY = 0,
|
||||||
|
MCAST_DIRECT = 1,
|
||||||
|
MCAST_DEFAULT = 2
|
||||||
|
};
|
||||||
|
|
||||||
struct mlx4_set_port_general_context {
|
struct mlx4_set_port_general_context {
|
||||||
u8 reserved[3];
|
u8 reserved[3];
|
||||||
u8 flags;
|
u8 flags;
|
||||||
@ -60,14 +66,17 @@ struct mlx4_set_port_general_context {
|
|||||||
|
|
||||||
struct mlx4_set_port_rqp_calc_context {
|
struct mlx4_set_port_rqp_calc_context {
|
||||||
__be32 base_qpn;
|
__be32 base_qpn;
|
||||||
__be32 flags;
|
u8 rererved;
|
||||||
u8 reserved[3];
|
u8 n_mac;
|
||||||
|
u8 n_vlan;
|
||||||
|
u8 n_prio;
|
||||||
|
u8 reserved2[3];
|
||||||
u8 mac_miss;
|
u8 mac_miss;
|
||||||
u8 intra_no_vlan;
|
u8 intra_no_vlan;
|
||||||
u8 no_vlan;
|
u8 no_vlan;
|
||||||
u8 intra_vlan_miss;
|
u8 intra_vlan_miss;
|
||||||
u8 vlan_miss;
|
u8 vlan_miss;
|
||||||
u8 reserved2[3];
|
u8 reserved3[3];
|
||||||
u8 no_vlan_prio;
|
u8 no_vlan_prio;
|
||||||
__be32 promisc;
|
__be32 promisc;
|
||||||
__be32 mcast;
|
__be32 mcast;
|
||||||
|
@ -845,16 +845,10 @@ int mlx4_en_config_rss_steer(struct mlx4_en_priv *priv)
|
|||||||
}
|
}
|
||||||
|
|
||||||
/* Configure RSS indirection qp */
|
/* Configure RSS indirection qp */
|
||||||
err = mlx4_qp_reserve_range(mdev->dev, 1, 1, &priv->base_qpn);
|
|
||||||
if (err) {
|
|
||||||
en_err(priv, "Failed to reserve range for RSS "
|
|
||||||
"indirection qp\n");
|
|
||||||
goto rss_err;
|
|
||||||
}
|
|
||||||
err = mlx4_qp_alloc(mdev->dev, priv->base_qpn, &rss_map->indir_qp);
|
err = mlx4_qp_alloc(mdev->dev, priv->base_qpn, &rss_map->indir_qp);
|
||||||
if (err) {
|
if (err) {
|
||||||
en_err(priv, "Failed to allocate RSS indirection QP\n");
|
en_err(priv, "Failed to allocate RSS indirection QP\n");
|
||||||
goto reserve_err;
|
goto rss_err;
|
||||||
}
|
}
|
||||||
rss_map->indir_qp.event = mlx4_en_sqp_event;
|
rss_map->indir_qp.event = mlx4_en_sqp_event;
|
||||||
mlx4_en_fill_qp_context(priv, 0, 0, 0, 1, priv->base_qpn,
|
mlx4_en_fill_qp_context(priv, 0, 0, 0, 1, priv->base_qpn,
|
||||||
@ -881,8 +875,6 @@ indir_err:
|
|||||||
MLX4_QP_STATE_RST, NULL, 0, 0, &rss_map->indir_qp);
|
MLX4_QP_STATE_RST, NULL, 0, 0, &rss_map->indir_qp);
|
||||||
mlx4_qp_remove(mdev->dev, &rss_map->indir_qp);
|
mlx4_qp_remove(mdev->dev, &rss_map->indir_qp);
|
||||||
mlx4_qp_free(mdev->dev, &rss_map->indir_qp);
|
mlx4_qp_free(mdev->dev, &rss_map->indir_qp);
|
||||||
reserve_err:
|
|
||||||
mlx4_qp_release_range(mdev->dev, priv->base_qpn, 1);
|
|
||||||
rss_err:
|
rss_err:
|
||||||
for (i = 0; i < good_qps; i++) {
|
for (i = 0; i < good_qps; i++) {
|
||||||
mlx4_qp_modify(mdev->dev, NULL, rss_map->state[i],
|
mlx4_qp_modify(mdev->dev, NULL, rss_map->state[i],
|
||||||
@ -904,7 +896,6 @@ void mlx4_en_release_rss_steer(struct mlx4_en_priv *priv)
|
|||||||
MLX4_QP_STATE_RST, NULL, 0, 0, &rss_map->indir_qp);
|
MLX4_QP_STATE_RST, NULL, 0, 0, &rss_map->indir_qp);
|
||||||
mlx4_qp_remove(mdev->dev, &rss_map->indir_qp);
|
mlx4_qp_remove(mdev->dev, &rss_map->indir_qp);
|
||||||
mlx4_qp_free(mdev->dev, &rss_map->indir_qp);
|
mlx4_qp_free(mdev->dev, &rss_map->indir_qp);
|
||||||
mlx4_qp_release_range(mdev->dev, priv->base_qpn, 1);
|
|
||||||
|
|
||||||
for (i = 0; i < priv->rx_ring_num; i++) {
|
for (i = 0; i < priv->rx_ring_num; i++) {
|
||||||
mlx4_qp_modify(mdev->dev, NULL, rss_map->state[i],
|
mlx4_qp_modify(mdev->dev, NULL, rss_map->state[i],
|
||||||
|
@ -44,6 +44,7 @@
|
|||||||
|
|
||||||
enum {
|
enum {
|
||||||
MAX_INLINE = 104, /* 128 - 16 - 4 - 4 */
|
MAX_INLINE = 104, /* 128 - 16 - 4 - 4 */
|
||||||
|
MAX_BF = 256,
|
||||||
};
|
};
|
||||||
|
|
||||||
static int inline_thold __read_mostly = MAX_INLINE;
|
static int inline_thold __read_mostly = MAX_INLINE;
|
||||||
@ -52,7 +53,7 @@ module_param_named(inline_thold, inline_thold, int, 0444);
|
|||||||
MODULE_PARM_DESC(inline_thold, "threshold for using inline data");
|
MODULE_PARM_DESC(inline_thold, "threshold for using inline data");
|
||||||
|
|
||||||
int mlx4_en_create_tx_ring(struct mlx4_en_priv *priv,
|
int mlx4_en_create_tx_ring(struct mlx4_en_priv *priv,
|
||||||
struct mlx4_en_tx_ring *ring, u32 size,
|
struct mlx4_en_tx_ring *ring, int qpn, u32 size,
|
||||||
u16 stride)
|
u16 stride)
|
||||||
{
|
{
|
||||||
struct mlx4_en_dev *mdev = priv->mdev;
|
struct mlx4_en_dev *mdev = priv->mdev;
|
||||||
@ -103,23 +104,25 @@ int mlx4_en_create_tx_ring(struct mlx4_en_priv *priv,
|
|||||||
"buf_size:%d dma:%llx\n", ring, ring->buf, ring->size,
|
"buf_size:%d dma:%llx\n", ring, ring->buf, ring->size,
|
||||||
ring->buf_size, (unsigned long long) ring->wqres.buf.direct.map);
|
ring->buf_size, (unsigned long long) ring->wqres.buf.direct.map);
|
||||||
|
|
||||||
err = mlx4_qp_reserve_range(mdev->dev, 1, 1, &ring->qpn);
|
ring->qpn = qpn;
|
||||||
if (err) {
|
|
||||||
en_err(priv, "Failed reserving qp for tx ring.\n");
|
|
||||||
goto err_map;
|
|
||||||
}
|
|
||||||
|
|
||||||
err = mlx4_qp_alloc(mdev->dev, ring->qpn, &ring->qp);
|
err = mlx4_qp_alloc(mdev->dev, ring->qpn, &ring->qp);
|
||||||
if (err) {
|
if (err) {
|
||||||
en_err(priv, "Failed allocating qp %d\n", ring->qpn);
|
en_err(priv, "Failed allocating qp %d\n", ring->qpn);
|
||||||
goto err_reserve;
|
goto err_map;
|
||||||
}
|
}
|
||||||
ring->qp.event = mlx4_en_sqp_event;
|
ring->qp.event = mlx4_en_sqp_event;
|
||||||
|
|
||||||
|
err = mlx4_bf_alloc(mdev->dev, &ring->bf);
|
||||||
|
if (err) {
|
||||||
|
en_dbg(DRV, priv, "working without blueflame (%d)", err);
|
||||||
|
ring->bf.uar = &mdev->priv_uar;
|
||||||
|
ring->bf.uar->map = mdev->uar_map;
|
||||||
|
ring->bf_enabled = false;
|
||||||
|
} else
|
||||||
|
ring->bf_enabled = true;
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
err_reserve:
|
|
||||||
mlx4_qp_release_range(mdev->dev, ring->qpn, 1);
|
|
||||||
err_map:
|
err_map:
|
||||||
mlx4_en_unmap_buffer(&ring->wqres.buf);
|
mlx4_en_unmap_buffer(&ring->wqres.buf);
|
||||||
err_hwq_res:
|
err_hwq_res:
|
||||||
@ -139,6 +142,8 @@ void mlx4_en_destroy_tx_ring(struct mlx4_en_priv *priv,
|
|||||||
struct mlx4_en_dev *mdev = priv->mdev;
|
struct mlx4_en_dev *mdev = priv->mdev;
|
||||||
en_dbg(DRV, priv, "Destroying tx ring, qpn: %d\n", ring->qpn);
|
en_dbg(DRV, priv, "Destroying tx ring, qpn: %d\n", ring->qpn);
|
||||||
|
|
||||||
|
if (ring->bf_enabled)
|
||||||
|
mlx4_bf_free(mdev->dev, &ring->bf);
|
||||||
mlx4_qp_remove(mdev->dev, &ring->qp);
|
mlx4_qp_remove(mdev->dev, &ring->qp);
|
||||||
mlx4_qp_free(mdev->dev, &ring->qp);
|
mlx4_qp_free(mdev->dev, &ring->qp);
|
||||||
mlx4_qp_release_range(mdev->dev, ring->qpn, 1);
|
mlx4_qp_release_range(mdev->dev, ring->qpn, 1);
|
||||||
@ -171,6 +176,8 @@ int mlx4_en_activate_tx_ring(struct mlx4_en_priv *priv,
|
|||||||
|
|
||||||
mlx4_en_fill_qp_context(priv, ring->size, ring->stride, 1, 0, ring->qpn,
|
mlx4_en_fill_qp_context(priv, ring->size, ring->stride, 1, 0, ring->qpn,
|
||||||
ring->cqn, &ring->context);
|
ring->cqn, &ring->context);
|
||||||
|
if (ring->bf_enabled)
|
||||||
|
ring->context.usr_page = cpu_to_be32(ring->bf.uar->index);
|
||||||
|
|
||||||
err = mlx4_qp_to_ready(mdev->dev, &ring->wqres.mtt, &ring->context,
|
err = mlx4_qp_to_ready(mdev->dev, &ring->wqres.mtt, &ring->context,
|
||||||
&ring->qp, &ring->qp_state);
|
&ring->qp, &ring->qp_state);
|
||||||
@ -591,6 +598,11 @@ u16 mlx4_en_select_queue(struct net_device *dev, struct sk_buff *skb)
|
|||||||
return skb_tx_hash(dev, skb);
|
return skb_tx_hash(dev, skb);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static void mlx4_bf_copy(unsigned long *dst, unsigned long *src, unsigned bytecnt)
|
||||||
|
{
|
||||||
|
__iowrite64_copy(dst, src, bytecnt / 8);
|
||||||
|
}
|
||||||
|
|
||||||
netdev_tx_t mlx4_en_xmit(struct sk_buff *skb, struct net_device *dev)
|
netdev_tx_t mlx4_en_xmit(struct sk_buff *skb, struct net_device *dev)
|
||||||
{
|
{
|
||||||
struct mlx4_en_priv *priv = netdev_priv(dev);
|
struct mlx4_en_priv *priv = netdev_priv(dev);
|
||||||
@ -609,12 +621,13 @@ netdev_tx_t mlx4_en_xmit(struct sk_buff *skb, struct net_device *dev)
|
|||||||
int desc_size;
|
int desc_size;
|
||||||
int real_size;
|
int real_size;
|
||||||
dma_addr_t dma;
|
dma_addr_t dma;
|
||||||
u32 index;
|
u32 index, bf_index;
|
||||||
__be32 op_own;
|
__be32 op_own;
|
||||||
u16 vlan_tag = 0;
|
u16 vlan_tag = 0;
|
||||||
int i;
|
int i;
|
||||||
int lso_header_size;
|
int lso_header_size;
|
||||||
void *fragptr;
|
void *fragptr;
|
||||||
|
bool bounce = false;
|
||||||
|
|
||||||
if (!priv->port_up)
|
if (!priv->port_up)
|
||||||
goto tx_drop;
|
goto tx_drop;
|
||||||
@ -657,13 +670,16 @@ netdev_tx_t mlx4_en_xmit(struct sk_buff *skb, struct net_device *dev)
|
|||||||
|
|
||||||
/* Packet is good - grab an index and transmit it */
|
/* Packet is good - grab an index and transmit it */
|
||||||
index = ring->prod & ring->size_mask;
|
index = ring->prod & ring->size_mask;
|
||||||
|
bf_index = ring->prod;
|
||||||
|
|
||||||
/* See if we have enough space for whole descriptor TXBB for setting
|
/* See if we have enough space for whole descriptor TXBB for setting
|
||||||
* SW ownership on next descriptor; if not, use a bounce buffer. */
|
* SW ownership on next descriptor; if not, use a bounce buffer. */
|
||||||
if (likely(index + nr_txbb <= ring->size))
|
if (likely(index + nr_txbb <= ring->size))
|
||||||
tx_desc = ring->buf + index * TXBB_SIZE;
|
tx_desc = ring->buf + index * TXBB_SIZE;
|
||||||
else
|
else {
|
||||||
tx_desc = (struct mlx4_en_tx_desc *) ring->bounce_buf;
|
tx_desc = (struct mlx4_en_tx_desc *) ring->bounce_buf;
|
||||||
|
bounce = true;
|
||||||
|
}
|
||||||
|
|
||||||
/* Save skb in tx_info ring */
|
/* Save skb in tx_info ring */
|
||||||
tx_info = &ring->tx_info[index];
|
tx_info = &ring->tx_info[index];
|
||||||
@ -768,21 +784,37 @@ netdev_tx_t mlx4_en_xmit(struct sk_buff *skb, struct net_device *dev)
|
|||||||
ring->prod += nr_txbb;
|
ring->prod += nr_txbb;
|
||||||
|
|
||||||
/* If we used a bounce buffer then copy descriptor back into place */
|
/* If we used a bounce buffer then copy descriptor back into place */
|
||||||
if (tx_desc == (struct mlx4_en_tx_desc *) ring->bounce_buf)
|
if (bounce)
|
||||||
tx_desc = mlx4_en_bounce_to_desc(priv, ring, index, desc_size);
|
tx_desc = mlx4_en_bounce_to_desc(priv, ring, index, desc_size);
|
||||||
|
|
||||||
/* Run destructor before passing skb to HW */
|
/* Run destructor before passing skb to HW */
|
||||||
if (likely(!skb_shared(skb)))
|
if (likely(!skb_shared(skb)))
|
||||||
skb_orphan(skb);
|
skb_orphan(skb);
|
||||||
|
|
||||||
/* Ensure new descirptor hits memory
|
if (ring->bf_enabled && desc_size <= MAX_BF && !bounce && !vlan_tag) {
|
||||||
* before setting ownership of this descriptor to HW */
|
*(u32 *) (&tx_desc->ctrl.vlan_tag) |= ring->doorbell_qpn;
|
||||||
wmb();
|
op_own |= htonl((bf_index & 0xffff) << 8);
|
||||||
tx_desc->ctrl.owner_opcode = op_own;
|
/* Ensure new descirptor hits memory
|
||||||
|
* before setting ownership of this descriptor to HW */
|
||||||
|
wmb();
|
||||||
|
tx_desc->ctrl.owner_opcode = op_own;
|
||||||
|
|
||||||
/* Ring doorbell! */
|
wmb();
|
||||||
wmb();
|
|
||||||
writel(ring->doorbell_qpn, mdev->uar_map + MLX4_SEND_DOORBELL);
|
mlx4_bf_copy(ring->bf.reg + ring->bf.offset, (unsigned long *) &tx_desc->ctrl,
|
||||||
|
desc_size);
|
||||||
|
|
||||||
|
wmb();
|
||||||
|
|
||||||
|
ring->bf.offset ^= ring->bf.buf_size;
|
||||||
|
} else {
|
||||||
|
/* Ensure new descirptor hits memory
|
||||||
|
* before setting ownership of this descriptor to HW */
|
||||||
|
wmb();
|
||||||
|
tx_desc->ctrl.owner_opcode = op_own;
|
||||||
|
wmb();
|
||||||
|
writel(ring->doorbell_qpn, ring->bf.uar->map + MLX4_SEND_DOORBELL);
|
||||||
|
}
|
||||||
|
|
||||||
/* Poll CQ here */
|
/* Poll CQ here */
|
||||||
mlx4_en_xmit_poll(priv, tx_ind);
|
mlx4_en_xmit_poll(priv, tx_ind);
|
||||||
|
@ -42,7 +42,7 @@
|
|||||||
#include "fw.h"
|
#include "fw.h"
|
||||||
|
|
||||||
enum {
|
enum {
|
||||||
MLX4_IRQNAME_SIZE = 64
|
MLX4_IRQNAME_SIZE = 32
|
||||||
};
|
};
|
||||||
|
|
||||||
enum {
|
enum {
|
||||||
@ -317,8 +317,8 @@ static int mlx4_num_eq_uar(struct mlx4_dev *dev)
|
|||||||
* we need to map, take the difference of highest index and
|
* we need to map, take the difference of highest index and
|
||||||
* the lowest index we'll use and add 1.
|
* the lowest index we'll use and add 1.
|
||||||
*/
|
*/
|
||||||
return (dev->caps.num_comp_vectors + 1 + dev->caps.reserved_eqs) / 4 -
|
return (dev->caps.num_comp_vectors + 1 + dev->caps.reserved_eqs +
|
||||||
dev->caps.reserved_eqs / 4 + 1;
|
dev->caps.comp_pool)/4 - dev->caps.reserved_eqs/4 + 1;
|
||||||
}
|
}
|
||||||
|
|
||||||
static void __iomem *mlx4_get_eq_uar(struct mlx4_dev *dev, struct mlx4_eq *eq)
|
static void __iomem *mlx4_get_eq_uar(struct mlx4_dev *dev, struct mlx4_eq *eq)
|
||||||
@ -496,16 +496,32 @@ static void mlx4_free_eq(struct mlx4_dev *dev,
|
|||||||
static void mlx4_free_irqs(struct mlx4_dev *dev)
|
static void mlx4_free_irqs(struct mlx4_dev *dev)
|
||||||
{
|
{
|
||||||
struct mlx4_eq_table *eq_table = &mlx4_priv(dev)->eq_table;
|
struct mlx4_eq_table *eq_table = &mlx4_priv(dev)->eq_table;
|
||||||
int i;
|
struct mlx4_priv *priv = mlx4_priv(dev);
|
||||||
|
int i, vec;
|
||||||
|
|
||||||
if (eq_table->have_irq)
|
if (eq_table->have_irq)
|
||||||
free_irq(dev->pdev->irq, dev);
|
free_irq(dev->pdev->irq, dev);
|
||||||
|
|
||||||
for (i = 0; i < dev->caps.num_comp_vectors + 1; ++i)
|
for (i = 0; i < dev->caps.num_comp_vectors + 1; ++i)
|
||||||
if (eq_table->eq[i].have_irq) {
|
if (eq_table->eq[i].have_irq) {
|
||||||
free_irq(eq_table->eq[i].irq, eq_table->eq + i);
|
free_irq(eq_table->eq[i].irq, eq_table->eq + i);
|
||||||
eq_table->eq[i].have_irq = 0;
|
eq_table->eq[i].have_irq = 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
for (i = 0; i < dev->caps.comp_pool; i++) {
|
||||||
|
/*
|
||||||
|
* Freeing the assigned irq's
|
||||||
|
* all bits should be 0, but we need to validate
|
||||||
|
*/
|
||||||
|
if (priv->msix_ctl.pool_bm & 1ULL << i) {
|
||||||
|
/* NO need protecting*/
|
||||||
|
vec = dev->caps.num_comp_vectors + 1 + i;
|
||||||
|
free_irq(priv->eq_table.eq[vec].irq,
|
||||||
|
&priv->eq_table.eq[vec]);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
kfree(eq_table->irq_names);
|
kfree(eq_table->irq_names);
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -578,7 +594,8 @@ int mlx4_init_eq_table(struct mlx4_dev *dev)
|
|||||||
(priv->eq_table.inta_pin < 32 ? 4 : 0);
|
(priv->eq_table.inta_pin < 32 ? 4 : 0);
|
||||||
|
|
||||||
priv->eq_table.irq_names =
|
priv->eq_table.irq_names =
|
||||||
kmalloc(MLX4_IRQNAME_SIZE * (dev->caps.num_comp_vectors + 1),
|
kmalloc(MLX4_IRQNAME_SIZE * (dev->caps.num_comp_vectors + 1 +
|
||||||
|
dev->caps.comp_pool),
|
||||||
GFP_KERNEL);
|
GFP_KERNEL);
|
||||||
if (!priv->eq_table.irq_names) {
|
if (!priv->eq_table.irq_names) {
|
||||||
err = -ENOMEM;
|
err = -ENOMEM;
|
||||||
@ -601,6 +618,22 @@ int mlx4_init_eq_table(struct mlx4_dev *dev)
|
|||||||
if (err)
|
if (err)
|
||||||
goto err_out_comp;
|
goto err_out_comp;
|
||||||
|
|
||||||
|
/*if additional completion vectors poolsize is 0 this loop will not run*/
|
||||||
|
for (i = dev->caps.num_comp_vectors + 1;
|
||||||
|
i < dev->caps.num_comp_vectors + dev->caps.comp_pool + 1; ++i) {
|
||||||
|
|
||||||
|
err = mlx4_create_eq(dev, dev->caps.num_cqs -
|
||||||
|
dev->caps.reserved_cqs +
|
||||||
|
MLX4_NUM_SPARE_EQE,
|
||||||
|
(dev->flags & MLX4_FLAG_MSI_X) ? i : 0,
|
||||||
|
&priv->eq_table.eq[i]);
|
||||||
|
if (err) {
|
||||||
|
--i;
|
||||||
|
goto err_out_unmap;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
if (dev->flags & MLX4_FLAG_MSI_X) {
|
if (dev->flags & MLX4_FLAG_MSI_X) {
|
||||||
const char *eq_name;
|
const char *eq_name;
|
||||||
|
|
||||||
@ -686,7 +719,7 @@ void mlx4_cleanup_eq_table(struct mlx4_dev *dev)
|
|||||||
|
|
||||||
mlx4_free_irqs(dev);
|
mlx4_free_irqs(dev);
|
||||||
|
|
||||||
for (i = 0; i < dev->caps.num_comp_vectors + 1; ++i)
|
for (i = 0; i < dev->caps.num_comp_vectors + dev->caps.comp_pool + 1; ++i)
|
||||||
mlx4_free_eq(dev, &priv->eq_table.eq[i]);
|
mlx4_free_eq(dev, &priv->eq_table.eq[i]);
|
||||||
|
|
||||||
mlx4_unmap_clr_int(dev);
|
mlx4_unmap_clr_int(dev);
|
||||||
@ -743,3 +776,65 @@ int mlx4_test_interrupts(struct mlx4_dev *dev)
|
|||||||
return err;
|
return err;
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL(mlx4_test_interrupts);
|
EXPORT_SYMBOL(mlx4_test_interrupts);
|
||||||
|
|
||||||
|
int mlx4_assign_eq(struct mlx4_dev *dev, char* name, int * vector)
|
||||||
|
{
|
||||||
|
|
||||||
|
struct mlx4_priv *priv = mlx4_priv(dev);
|
||||||
|
int vec = 0, err = 0, i;
|
||||||
|
|
||||||
|
spin_lock(&priv->msix_ctl.pool_lock);
|
||||||
|
for (i = 0; !vec && i < dev->caps.comp_pool; i++) {
|
||||||
|
if (~priv->msix_ctl.pool_bm & 1ULL << i) {
|
||||||
|
priv->msix_ctl.pool_bm |= 1ULL << i;
|
||||||
|
vec = dev->caps.num_comp_vectors + 1 + i;
|
||||||
|
snprintf(priv->eq_table.irq_names +
|
||||||
|
vec * MLX4_IRQNAME_SIZE,
|
||||||
|
MLX4_IRQNAME_SIZE, "%s", name);
|
||||||
|
err = request_irq(priv->eq_table.eq[vec].irq,
|
||||||
|
mlx4_msi_x_interrupt, 0,
|
||||||
|
&priv->eq_table.irq_names[vec<<5],
|
||||||
|
priv->eq_table.eq + vec);
|
||||||
|
if (err) {
|
||||||
|
/*zero out bit by fliping it*/
|
||||||
|
priv->msix_ctl.pool_bm ^= 1 << i;
|
||||||
|
vec = 0;
|
||||||
|
continue;
|
||||||
|
/*we dont want to break here*/
|
||||||
|
}
|
||||||
|
eq_set_ci(&priv->eq_table.eq[vec], 1);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
spin_unlock(&priv->msix_ctl.pool_lock);
|
||||||
|
|
||||||
|
if (vec) {
|
||||||
|
*vector = vec;
|
||||||
|
} else {
|
||||||
|
*vector = 0;
|
||||||
|
err = (i == dev->caps.comp_pool) ? -ENOSPC : err;
|
||||||
|
}
|
||||||
|
return err;
|
||||||
|
}
|
||||||
|
EXPORT_SYMBOL(mlx4_assign_eq);
|
||||||
|
|
||||||
|
void mlx4_release_eq(struct mlx4_dev *dev, int vec)
|
||||||
|
{
|
||||||
|
struct mlx4_priv *priv = mlx4_priv(dev);
|
||||||
|
/*bm index*/
|
||||||
|
int i = vec - dev->caps.num_comp_vectors - 1;
|
||||||
|
|
||||||
|
if (likely(i >= 0)) {
|
||||||
|
/*sanity check , making sure were not trying to free irq's
|
||||||
|
Belonging to a legacy EQ*/
|
||||||
|
spin_lock(&priv->msix_ctl.pool_lock);
|
||||||
|
if (priv->msix_ctl.pool_bm & 1ULL << i) {
|
||||||
|
free_irq(priv->eq_table.eq[vec].irq,
|
||||||
|
&priv->eq_table.eq[vec]);
|
||||||
|
priv->msix_ctl.pool_bm &= ~(1ULL << i);
|
||||||
|
}
|
||||||
|
spin_unlock(&priv->msix_ctl.pool_lock);
|
||||||
|
}
|
||||||
|
|
||||||
|
}
|
||||||
|
EXPORT_SYMBOL(mlx4_release_eq);
|
||||||
|
|
||||||
|
@ -274,8 +274,11 @@ int mlx4_QUERY_DEV_CAP(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap)
|
|||||||
dev_cap->stat_rate_support = stat_rate;
|
dev_cap->stat_rate_support = stat_rate;
|
||||||
MLX4_GET(field, outbox, QUERY_DEV_CAP_UDP_RSS_OFFSET);
|
MLX4_GET(field, outbox, QUERY_DEV_CAP_UDP_RSS_OFFSET);
|
||||||
dev_cap->udp_rss = field & 0x1;
|
dev_cap->udp_rss = field & 0x1;
|
||||||
|
dev_cap->vep_uc_steering = field & 0x2;
|
||||||
|
dev_cap->vep_mc_steering = field & 0x4;
|
||||||
MLX4_GET(field, outbox, QUERY_DEV_CAP_ETH_UC_LOOPBACK_OFFSET);
|
MLX4_GET(field, outbox, QUERY_DEV_CAP_ETH_UC_LOOPBACK_OFFSET);
|
||||||
dev_cap->loopback_support = field & 0x1;
|
dev_cap->loopback_support = field & 0x1;
|
||||||
|
dev_cap->wol = field & 0x40;
|
||||||
MLX4_GET(dev_cap->flags, outbox, QUERY_DEV_CAP_FLAGS_OFFSET);
|
MLX4_GET(dev_cap->flags, outbox, QUERY_DEV_CAP_FLAGS_OFFSET);
|
||||||
MLX4_GET(field, outbox, QUERY_DEV_CAP_RSVD_UAR_OFFSET);
|
MLX4_GET(field, outbox, QUERY_DEV_CAP_RSVD_UAR_OFFSET);
|
||||||
dev_cap->reserved_uars = field >> 4;
|
dev_cap->reserved_uars = field >> 4;
|
||||||
@ -737,6 +740,7 @@ int mlx4_INIT_HCA(struct mlx4_dev *dev, struct mlx4_init_hca_param *param)
|
|||||||
#define INIT_HCA_MC_BASE_OFFSET (INIT_HCA_MCAST_OFFSET + 0x00)
|
#define INIT_HCA_MC_BASE_OFFSET (INIT_HCA_MCAST_OFFSET + 0x00)
|
||||||
#define INIT_HCA_LOG_MC_ENTRY_SZ_OFFSET (INIT_HCA_MCAST_OFFSET + 0x12)
|
#define INIT_HCA_LOG_MC_ENTRY_SZ_OFFSET (INIT_HCA_MCAST_OFFSET + 0x12)
|
||||||
#define INIT_HCA_LOG_MC_HASH_SZ_OFFSET (INIT_HCA_MCAST_OFFSET + 0x16)
|
#define INIT_HCA_LOG_MC_HASH_SZ_OFFSET (INIT_HCA_MCAST_OFFSET + 0x16)
|
||||||
|
#define INIT_HCA_UC_STEERING_OFFSET (INIT_HCA_MCAST_OFFSET + 0x18)
|
||||||
#define INIT_HCA_LOG_MC_TABLE_SZ_OFFSET (INIT_HCA_MCAST_OFFSET + 0x1b)
|
#define INIT_HCA_LOG_MC_TABLE_SZ_OFFSET (INIT_HCA_MCAST_OFFSET + 0x1b)
|
||||||
#define INIT_HCA_TPT_OFFSET 0x0f0
|
#define INIT_HCA_TPT_OFFSET 0x0f0
|
||||||
#define INIT_HCA_DMPT_BASE_OFFSET (INIT_HCA_TPT_OFFSET + 0x00)
|
#define INIT_HCA_DMPT_BASE_OFFSET (INIT_HCA_TPT_OFFSET + 0x00)
|
||||||
@ -797,6 +801,8 @@ int mlx4_INIT_HCA(struct mlx4_dev *dev, struct mlx4_init_hca_param *param)
|
|||||||
MLX4_PUT(inbox, param->mc_base, INIT_HCA_MC_BASE_OFFSET);
|
MLX4_PUT(inbox, param->mc_base, INIT_HCA_MC_BASE_OFFSET);
|
||||||
MLX4_PUT(inbox, param->log_mc_entry_sz, INIT_HCA_LOG_MC_ENTRY_SZ_OFFSET);
|
MLX4_PUT(inbox, param->log_mc_entry_sz, INIT_HCA_LOG_MC_ENTRY_SZ_OFFSET);
|
||||||
MLX4_PUT(inbox, param->log_mc_hash_sz, INIT_HCA_LOG_MC_HASH_SZ_OFFSET);
|
MLX4_PUT(inbox, param->log_mc_hash_sz, INIT_HCA_LOG_MC_HASH_SZ_OFFSET);
|
||||||
|
if (dev->caps.vep_mc_steering)
|
||||||
|
MLX4_PUT(inbox, (u8) (1 << 3), INIT_HCA_UC_STEERING_OFFSET);
|
||||||
MLX4_PUT(inbox, param->log_mc_table_sz, INIT_HCA_LOG_MC_TABLE_SZ_OFFSET);
|
MLX4_PUT(inbox, param->log_mc_table_sz, INIT_HCA_LOG_MC_TABLE_SZ_OFFSET);
|
||||||
|
|
||||||
/* TPT attributes */
|
/* TPT attributes */
|
||||||
@ -908,3 +914,22 @@ int mlx4_NOP(struct mlx4_dev *dev)
|
|||||||
/* Input modifier of 0x1f means "finish as soon as possible." */
|
/* Input modifier of 0x1f means "finish as soon as possible." */
|
||||||
return mlx4_cmd(dev, 0, 0x1f, 0, MLX4_CMD_NOP, 100);
|
return mlx4_cmd(dev, 0, 0x1f, 0, MLX4_CMD_NOP, 100);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#define MLX4_WOL_SETUP_MODE (5 << 28)
|
||||||
|
int mlx4_wol_read(struct mlx4_dev *dev, u64 *config, int port)
|
||||||
|
{
|
||||||
|
u32 in_mod = MLX4_WOL_SETUP_MODE | port << 8;
|
||||||
|
|
||||||
|
return mlx4_cmd_imm(dev, 0, config, in_mod, 0x3,
|
||||||
|
MLX4_CMD_MOD_STAT_CFG, MLX4_CMD_TIME_CLASS_A);
|
||||||
|
}
|
||||||
|
EXPORT_SYMBOL_GPL(mlx4_wol_read);
|
||||||
|
|
||||||
|
int mlx4_wol_write(struct mlx4_dev *dev, u64 config, int port)
|
||||||
|
{
|
||||||
|
u32 in_mod = MLX4_WOL_SETUP_MODE | port << 8;
|
||||||
|
|
||||||
|
return mlx4_cmd(dev, config, in_mod, 0x1, MLX4_CMD_MOD_STAT_CFG,
|
||||||
|
MLX4_CMD_TIME_CLASS_A);
|
||||||
|
}
|
||||||
|
EXPORT_SYMBOL_GPL(mlx4_wol_write);
|
||||||
|
@ -80,6 +80,9 @@ struct mlx4_dev_cap {
|
|||||||
u16 stat_rate_support;
|
u16 stat_rate_support;
|
||||||
int udp_rss;
|
int udp_rss;
|
||||||
int loopback_support;
|
int loopback_support;
|
||||||
|
int vep_uc_steering;
|
||||||
|
int vep_mc_steering;
|
||||||
|
int wol;
|
||||||
u32 flags;
|
u32 flags;
|
||||||
int reserved_uars;
|
int reserved_uars;
|
||||||
int uar_size;
|
int uar_size;
|
||||||
|
@ -39,6 +39,7 @@
|
|||||||
#include <linux/pci.h>
|
#include <linux/pci.h>
|
||||||
#include <linux/dma-mapping.h>
|
#include <linux/dma-mapping.h>
|
||||||
#include <linux/slab.h>
|
#include <linux/slab.h>
|
||||||
|
#include <linux/io-mapping.h>
|
||||||
|
|
||||||
#include <linux/mlx4/device.h>
|
#include <linux/mlx4/device.h>
|
||||||
#include <linux/mlx4/doorbell.h>
|
#include <linux/mlx4/doorbell.h>
|
||||||
@ -227,6 +228,9 @@ static int mlx4_dev_cap(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap)
|
|||||||
dev->caps.stat_rate_support = dev_cap->stat_rate_support;
|
dev->caps.stat_rate_support = dev_cap->stat_rate_support;
|
||||||
dev->caps.udp_rss = dev_cap->udp_rss;
|
dev->caps.udp_rss = dev_cap->udp_rss;
|
||||||
dev->caps.loopback_support = dev_cap->loopback_support;
|
dev->caps.loopback_support = dev_cap->loopback_support;
|
||||||
|
dev->caps.vep_uc_steering = dev_cap->vep_uc_steering;
|
||||||
|
dev->caps.vep_mc_steering = dev_cap->vep_mc_steering;
|
||||||
|
dev->caps.wol = dev_cap->wol;
|
||||||
dev->caps.max_gso_sz = dev_cap->max_gso_sz;
|
dev->caps.max_gso_sz = dev_cap->max_gso_sz;
|
||||||
|
|
||||||
dev->caps.log_num_macs = log_num_mac;
|
dev->caps.log_num_macs = log_num_mac;
|
||||||
@ -718,8 +722,31 @@ static void mlx4_free_icms(struct mlx4_dev *dev)
|
|||||||
mlx4_free_icm(dev, priv->fw.aux_icm, 0);
|
mlx4_free_icm(dev, priv->fw.aux_icm, 0);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static int map_bf_area(struct mlx4_dev *dev)
|
||||||
|
{
|
||||||
|
struct mlx4_priv *priv = mlx4_priv(dev);
|
||||||
|
resource_size_t bf_start;
|
||||||
|
resource_size_t bf_len;
|
||||||
|
int err = 0;
|
||||||
|
|
||||||
|
bf_start = pci_resource_start(dev->pdev, 2) + (dev->caps.num_uars << PAGE_SHIFT);
|
||||||
|
bf_len = pci_resource_len(dev->pdev, 2) - (dev->caps.num_uars << PAGE_SHIFT);
|
||||||
|
priv->bf_mapping = io_mapping_create_wc(bf_start, bf_len);
|
||||||
|
if (!priv->bf_mapping)
|
||||||
|
err = -ENOMEM;
|
||||||
|
|
||||||
|
return err;
|
||||||
|
}
|
||||||
|
|
||||||
|
static void unmap_bf_area(struct mlx4_dev *dev)
|
||||||
|
{
|
||||||
|
if (mlx4_priv(dev)->bf_mapping)
|
||||||
|
io_mapping_free(mlx4_priv(dev)->bf_mapping);
|
||||||
|
}
|
||||||
|
|
||||||
static void mlx4_close_hca(struct mlx4_dev *dev)
|
static void mlx4_close_hca(struct mlx4_dev *dev)
|
||||||
{
|
{
|
||||||
|
unmap_bf_area(dev);
|
||||||
mlx4_CLOSE_HCA(dev, 0);
|
mlx4_CLOSE_HCA(dev, 0);
|
||||||
mlx4_free_icms(dev);
|
mlx4_free_icms(dev);
|
||||||
mlx4_UNMAP_FA(dev);
|
mlx4_UNMAP_FA(dev);
|
||||||
@ -772,6 +799,9 @@ static int mlx4_init_hca(struct mlx4_dev *dev)
|
|||||||
goto err_stop_fw;
|
goto err_stop_fw;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if (map_bf_area(dev))
|
||||||
|
mlx4_dbg(dev, "Failed to map blue flame area\n");
|
||||||
|
|
||||||
init_hca.log_uar_sz = ilog2(dev->caps.num_uars);
|
init_hca.log_uar_sz = ilog2(dev->caps.num_uars);
|
||||||
|
|
||||||
err = mlx4_init_icm(dev, &dev_cap, &init_hca, icm_size);
|
err = mlx4_init_icm(dev, &dev_cap, &init_hca, icm_size);
|
||||||
@ -802,6 +832,7 @@ err_free_icm:
|
|||||||
mlx4_free_icms(dev);
|
mlx4_free_icms(dev);
|
||||||
|
|
||||||
err_stop_fw:
|
err_stop_fw:
|
||||||
|
unmap_bf_area(dev);
|
||||||
mlx4_UNMAP_FA(dev);
|
mlx4_UNMAP_FA(dev);
|
||||||
mlx4_free_icm(dev, priv->fw.fw_icm, 0);
|
mlx4_free_icm(dev, priv->fw.fw_icm, 0);
|
||||||
|
|
||||||
@ -969,13 +1000,15 @@ static void mlx4_enable_msi_x(struct mlx4_dev *dev)
|
|||||||
{
|
{
|
||||||
struct mlx4_priv *priv = mlx4_priv(dev);
|
struct mlx4_priv *priv = mlx4_priv(dev);
|
||||||
struct msix_entry *entries;
|
struct msix_entry *entries;
|
||||||
int nreq;
|
int nreq = min_t(int, dev->caps.num_ports *
|
||||||
|
min_t(int, num_online_cpus() + 1, MAX_MSIX_P_PORT)
|
||||||
|
+ MSIX_LEGACY_SZ, MAX_MSIX);
|
||||||
int err;
|
int err;
|
||||||
int i;
|
int i;
|
||||||
|
|
||||||
if (msi_x) {
|
if (msi_x) {
|
||||||
nreq = min_t(int, dev->caps.num_eqs - dev->caps.reserved_eqs,
|
nreq = min_t(int, dev->caps.num_eqs - dev->caps.reserved_eqs,
|
||||||
num_possible_cpus() + 1);
|
nreq);
|
||||||
entries = kcalloc(nreq, sizeof *entries, GFP_KERNEL);
|
entries = kcalloc(nreq, sizeof *entries, GFP_KERNEL);
|
||||||
if (!entries)
|
if (!entries)
|
||||||
goto no_msi;
|
goto no_msi;
|
||||||
@ -998,7 +1031,15 @@ static void mlx4_enable_msi_x(struct mlx4_dev *dev)
|
|||||||
goto no_msi;
|
goto no_msi;
|
||||||
}
|
}
|
||||||
|
|
||||||
dev->caps.num_comp_vectors = nreq - 1;
|
if (nreq <
|
||||||
|
MSIX_LEGACY_SZ + dev->caps.num_ports * MIN_MSIX_P_PORT) {
|
||||||
|
/*Working in legacy mode , all EQ's shared*/
|
||||||
|
dev->caps.comp_pool = 0;
|
||||||
|
dev->caps.num_comp_vectors = nreq - 1;
|
||||||
|
} else {
|
||||||
|
dev->caps.comp_pool = nreq - MSIX_LEGACY_SZ;
|
||||||
|
dev->caps.num_comp_vectors = MSIX_LEGACY_SZ - 1;
|
||||||
|
}
|
||||||
for (i = 0; i < nreq; ++i)
|
for (i = 0; i < nreq; ++i)
|
||||||
priv->eq_table.eq[i].irq = entries[i].vector;
|
priv->eq_table.eq[i].irq = entries[i].vector;
|
||||||
|
|
||||||
@ -1010,6 +1051,7 @@ static void mlx4_enable_msi_x(struct mlx4_dev *dev)
|
|||||||
|
|
||||||
no_msi:
|
no_msi:
|
||||||
dev->caps.num_comp_vectors = 1;
|
dev->caps.num_comp_vectors = 1;
|
||||||
|
dev->caps.comp_pool = 0;
|
||||||
|
|
||||||
for (i = 0; i < 2; ++i)
|
for (i = 0; i < 2; ++i)
|
||||||
priv->eq_table.eq[i].irq = dev->pdev->irq;
|
priv->eq_table.eq[i].irq = dev->pdev->irq;
|
||||||
@ -1049,6 +1091,59 @@ static void mlx4_cleanup_port_info(struct mlx4_port_info *info)
|
|||||||
device_remove_file(&info->dev->pdev->dev, &info->port_attr);
|
device_remove_file(&info->dev->pdev->dev, &info->port_attr);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static int mlx4_init_steering(struct mlx4_dev *dev)
|
||||||
|
{
|
||||||
|
struct mlx4_priv *priv = mlx4_priv(dev);
|
||||||
|
int num_entries = dev->caps.num_ports;
|
||||||
|
int i, j;
|
||||||
|
|
||||||
|
priv->steer = kzalloc(sizeof(struct mlx4_steer) * num_entries, GFP_KERNEL);
|
||||||
|
if (!priv->steer)
|
||||||
|
return -ENOMEM;
|
||||||
|
|
||||||
|
for (i = 0; i < num_entries; i++) {
|
||||||
|
for (j = 0; j < MLX4_NUM_STEERS; j++) {
|
||||||
|
INIT_LIST_HEAD(&priv->steer[i].promisc_qps[j]);
|
||||||
|
INIT_LIST_HEAD(&priv->steer[i].steer_entries[j]);
|
||||||
|
}
|
||||||
|
INIT_LIST_HEAD(&priv->steer[i].high_prios);
|
||||||
|
}
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
static void mlx4_clear_steering(struct mlx4_dev *dev)
|
||||||
|
{
|
||||||
|
struct mlx4_priv *priv = mlx4_priv(dev);
|
||||||
|
struct mlx4_steer_index *entry, *tmp_entry;
|
||||||
|
struct mlx4_promisc_qp *pqp, *tmp_pqp;
|
||||||
|
int num_entries = dev->caps.num_ports;
|
||||||
|
int i, j;
|
||||||
|
|
||||||
|
for (i = 0; i < num_entries; i++) {
|
||||||
|
for (j = 0; j < MLX4_NUM_STEERS; j++) {
|
||||||
|
list_for_each_entry_safe(pqp, tmp_pqp,
|
||||||
|
&priv->steer[i].promisc_qps[j],
|
||||||
|
list) {
|
||||||
|
list_del(&pqp->list);
|
||||||
|
kfree(pqp);
|
||||||
|
}
|
||||||
|
list_for_each_entry_safe(entry, tmp_entry,
|
||||||
|
&priv->steer[i].steer_entries[j],
|
||||||
|
list) {
|
||||||
|
list_del(&entry->list);
|
||||||
|
list_for_each_entry_safe(pqp, tmp_pqp,
|
||||||
|
&entry->duplicates,
|
||||||
|
list) {
|
||||||
|
list_del(&pqp->list);
|
||||||
|
kfree(pqp);
|
||||||
|
}
|
||||||
|
kfree(entry);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
kfree(priv->steer);
|
||||||
|
}
|
||||||
|
|
||||||
static int __mlx4_init_one(struct pci_dev *pdev, const struct pci_device_id *id)
|
static int __mlx4_init_one(struct pci_dev *pdev, const struct pci_device_id *id)
|
||||||
{
|
{
|
||||||
struct mlx4_priv *priv;
|
struct mlx4_priv *priv;
|
||||||
@ -1130,6 +1225,11 @@ static int __mlx4_init_one(struct pci_dev *pdev, const struct pci_device_id *id)
|
|||||||
INIT_LIST_HEAD(&priv->pgdir_list);
|
INIT_LIST_HEAD(&priv->pgdir_list);
|
||||||
mutex_init(&priv->pgdir_mutex);
|
mutex_init(&priv->pgdir_mutex);
|
||||||
|
|
||||||
|
pci_read_config_byte(pdev, PCI_REVISION_ID, &dev->rev_id);
|
||||||
|
|
||||||
|
INIT_LIST_HEAD(&priv->bf_list);
|
||||||
|
mutex_init(&priv->bf_mutex);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Now reset the HCA before we touch the PCI capabilities or
|
* Now reset the HCA before we touch the PCI capabilities or
|
||||||
* attempt a firmware command, since a boot ROM may have left
|
* attempt a firmware command, since a boot ROM may have left
|
||||||
@ -1154,8 +1254,15 @@ static int __mlx4_init_one(struct pci_dev *pdev, const struct pci_device_id *id)
|
|||||||
if (err)
|
if (err)
|
||||||
goto err_close;
|
goto err_close;
|
||||||
|
|
||||||
|
priv->msix_ctl.pool_bm = 0;
|
||||||
|
spin_lock_init(&priv->msix_ctl.pool_lock);
|
||||||
|
|
||||||
mlx4_enable_msi_x(dev);
|
mlx4_enable_msi_x(dev);
|
||||||
|
|
||||||
|
err = mlx4_init_steering(dev);
|
||||||
|
if (err)
|
||||||
|
goto err_free_eq;
|
||||||
|
|
||||||
err = mlx4_setup_hca(dev);
|
err = mlx4_setup_hca(dev);
|
||||||
if (err == -EBUSY && (dev->flags & MLX4_FLAG_MSI_X)) {
|
if (err == -EBUSY && (dev->flags & MLX4_FLAG_MSI_X)) {
|
||||||
dev->flags &= ~MLX4_FLAG_MSI_X;
|
dev->flags &= ~MLX4_FLAG_MSI_X;
|
||||||
@ -1164,7 +1271,7 @@ static int __mlx4_init_one(struct pci_dev *pdev, const struct pci_device_id *id)
|
|||||||
}
|
}
|
||||||
|
|
||||||
if (err)
|
if (err)
|
||||||
goto err_free_eq;
|
goto err_steer;
|
||||||
|
|
||||||
for (port = 1; port <= dev->caps.num_ports; port++) {
|
for (port = 1; port <= dev->caps.num_ports; port++) {
|
||||||
err = mlx4_init_port_info(dev, port);
|
err = mlx4_init_port_info(dev, port);
|
||||||
@ -1197,6 +1304,9 @@ err_port:
|
|||||||
mlx4_cleanup_pd_table(dev);
|
mlx4_cleanup_pd_table(dev);
|
||||||
mlx4_cleanup_uar_table(dev);
|
mlx4_cleanup_uar_table(dev);
|
||||||
|
|
||||||
|
err_steer:
|
||||||
|
mlx4_clear_steering(dev);
|
||||||
|
|
||||||
err_free_eq:
|
err_free_eq:
|
||||||
mlx4_free_eq_table(dev);
|
mlx4_free_eq_table(dev);
|
||||||
|
|
||||||
@ -1256,6 +1366,7 @@ static void mlx4_remove_one(struct pci_dev *pdev)
|
|||||||
iounmap(priv->kar);
|
iounmap(priv->kar);
|
||||||
mlx4_uar_free(dev, &priv->driver_uar);
|
mlx4_uar_free(dev, &priv->driver_uar);
|
||||||
mlx4_cleanup_uar_table(dev);
|
mlx4_cleanup_uar_table(dev);
|
||||||
|
mlx4_clear_steering(dev);
|
||||||
mlx4_free_eq_table(dev);
|
mlx4_free_eq_table(dev);
|
||||||
mlx4_close_hca(dev);
|
mlx4_close_hca(dev);
|
||||||
mlx4_cmd_cleanup(dev);
|
mlx4_cmd_cleanup(dev);
|
||||||
|
@ -32,6 +32,7 @@
|
|||||||
*/
|
*/
|
||||||
|
|
||||||
#include <linux/string.h>
|
#include <linux/string.h>
|
||||||
|
#include <linux/etherdevice.h>
|
||||||
|
|
||||||
#include <linux/mlx4/cmd.h>
|
#include <linux/mlx4/cmd.h>
|
||||||
|
|
||||||
@ -40,38 +41,40 @@
|
|||||||
#define MGM_QPN_MASK 0x00FFFFFF
|
#define MGM_QPN_MASK 0x00FFFFFF
|
||||||
#define MGM_BLCK_LB_BIT 30
|
#define MGM_BLCK_LB_BIT 30
|
||||||
|
|
||||||
struct mlx4_mgm {
|
|
||||||
__be32 next_gid_index;
|
|
||||||
__be32 members_count;
|
|
||||||
u32 reserved[2];
|
|
||||||
u8 gid[16];
|
|
||||||
__be32 qp[MLX4_QP_PER_MGM];
|
|
||||||
};
|
|
||||||
|
|
||||||
static const u8 zero_gid[16]; /* automatically initialized to 0 */
|
static const u8 zero_gid[16]; /* automatically initialized to 0 */
|
||||||
|
|
||||||
static int mlx4_READ_MCG(struct mlx4_dev *dev, int index,
|
static int mlx4_READ_ENTRY(struct mlx4_dev *dev, int index,
|
||||||
struct mlx4_cmd_mailbox *mailbox)
|
struct mlx4_cmd_mailbox *mailbox)
|
||||||
{
|
{
|
||||||
return mlx4_cmd_box(dev, 0, mailbox->dma, index, 0, MLX4_CMD_READ_MCG,
|
return mlx4_cmd_box(dev, 0, mailbox->dma, index, 0, MLX4_CMD_READ_MCG,
|
||||||
MLX4_CMD_TIME_CLASS_A);
|
MLX4_CMD_TIME_CLASS_A);
|
||||||
}
|
}
|
||||||
|
|
||||||
static int mlx4_WRITE_MCG(struct mlx4_dev *dev, int index,
|
static int mlx4_WRITE_ENTRY(struct mlx4_dev *dev, int index,
|
||||||
struct mlx4_cmd_mailbox *mailbox)
|
struct mlx4_cmd_mailbox *mailbox)
|
||||||
{
|
{
|
||||||
return mlx4_cmd(dev, mailbox->dma, index, 0, MLX4_CMD_WRITE_MCG,
|
return mlx4_cmd(dev, mailbox->dma, index, 0, MLX4_CMD_WRITE_MCG,
|
||||||
MLX4_CMD_TIME_CLASS_A);
|
MLX4_CMD_TIME_CLASS_A);
|
||||||
}
|
}
|
||||||
|
|
||||||
static int mlx4_MGID_HASH(struct mlx4_dev *dev, struct mlx4_cmd_mailbox *mailbox,
|
static int mlx4_WRITE_PROMISC(struct mlx4_dev *dev, u8 vep_num, u8 port, u8 steer,
|
||||||
u16 *hash)
|
struct mlx4_cmd_mailbox *mailbox)
|
||||||
|
{
|
||||||
|
u32 in_mod;
|
||||||
|
|
||||||
|
in_mod = (u32) vep_num << 24 | (u32) port << 16 | steer << 1;
|
||||||
|
return mlx4_cmd(dev, mailbox->dma, in_mod, 0x1,
|
||||||
|
MLX4_CMD_WRITE_MCG, MLX4_CMD_TIME_CLASS_A);
|
||||||
|
}
|
||||||
|
|
||||||
|
static int mlx4_GID_HASH(struct mlx4_dev *dev, struct mlx4_cmd_mailbox *mailbox,
|
||||||
|
u16 *hash, u8 op_mod)
|
||||||
{
|
{
|
||||||
u64 imm;
|
u64 imm;
|
||||||
int err;
|
int err;
|
||||||
|
|
||||||
err = mlx4_cmd_imm(dev, mailbox->dma, &imm, 0, 0, MLX4_CMD_MGID_HASH,
|
err = mlx4_cmd_imm(dev, mailbox->dma, &imm, 0, op_mod,
|
||||||
MLX4_CMD_TIME_CLASS_A);
|
MLX4_CMD_MGID_HASH, MLX4_CMD_TIME_CLASS_A);
|
||||||
|
|
||||||
if (!err)
|
if (!err)
|
||||||
*hash = imm;
|
*hash = imm;
|
||||||
@ -79,6 +82,457 @@ static int mlx4_MGID_HASH(struct mlx4_dev *dev, struct mlx4_cmd_mailbox *mailbox
|
|||||||
return err;
|
return err;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static struct mlx4_promisc_qp *get_promisc_qp(struct mlx4_dev *dev, u8 pf_num,
|
||||||
|
enum mlx4_steer_type steer,
|
||||||
|
u32 qpn)
|
||||||
|
{
|
||||||
|
struct mlx4_steer *s_steer = &mlx4_priv(dev)->steer[pf_num];
|
||||||
|
struct mlx4_promisc_qp *pqp;
|
||||||
|
|
||||||
|
list_for_each_entry(pqp, &s_steer->promisc_qps[steer], list) {
|
||||||
|
if (pqp->qpn == qpn)
|
||||||
|
return pqp;
|
||||||
|
}
|
||||||
|
/* not found */
|
||||||
|
return NULL;
|
||||||
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Add new entry to steering data structure.
|
||||||
|
* All promisc QPs should be added as well
|
||||||
|
*/
|
||||||
|
static int new_steering_entry(struct mlx4_dev *dev, u8 vep_num, u8 port,
|
||||||
|
enum mlx4_steer_type steer,
|
||||||
|
unsigned int index, u32 qpn)
|
||||||
|
{
|
||||||
|
struct mlx4_steer *s_steer;
|
||||||
|
struct mlx4_cmd_mailbox *mailbox;
|
||||||
|
struct mlx4_mgm *mgm;
|
||||||
|
u32 members_count;
|
||||||
|
struct mlx4_steer_index *new_entry;
|
||||||
|
struct mlx4_promisc_qp *pqp;
|
||||||
|
struct mlx4_promisc_qp *dqp;
|
||||||
|
u32 prot;
|
||||||
|
int err;
|
||||||
|
u8 pf_num;
|
||||||
|
|
||||||
|
pf_num = (dev->caps.num_ports == 1) ? vep_num : (vep_num << 1) | (port - 1);
|
||||||
|
s_steer = &mlx4_priv(dev)->steer[pf_num];
|
||||||
|
new_entry = kzalloc(sizeof *new_entry, GFP_KERNEL);
|
||||||
|
if (!new_entry)
|
||||||
|
return -ENOMEM;
|
||||||
|
|
||||||
|
INIT_LIST_HEAD(&new_entry->duplicates);
|
||||||
|
new_entry->index = index;
|
||||||
|
list_add_tail(&new_entry->list, &s_steer->steer_entries[steer]);
|
||||||
|
|
||||||
|
/* If the given qpn is also a promisc qp,
|
||||||
|
* it should be inserted to duplicates list
|
||||||
|
*/
|
||||||
|
pqp = get_promisc_qp(dev, pf_num, steer, qpn);
|
||||||
|
if (pqp) {
|
||||||
|
dqp = kmalloc(sizeof *dqp, GFP_KERNEL);
|
||||||
|
if (!dqp) {
|
||||||
|
err = -ENOMEM;
|
||||||
|
goto out_alloc;
|
||||||
|
}
|
||||||
|
dqp->qpn = qpn;
|
||||||
|
list_add_tail(&dqp->list, &new_entry->duplicates);
|
||||||
|
}
|
||||||
|
|
||||||
|
/* if no promisc qps for this vep, we are done */
|
||||||
|
if (list_empty(&s_steer->promisc_qps[steer]))
|
||||||
|
return 0;
|
||||||
|
|
||||||
|
/* now need to add all the promisc qps to the new
|
||||||
|
* steering entry, as they should also receive the packets
|
||||||
|
* destined to this address */
|
||||||
|
mailbox = mlx4_alloc_cmd_mailbox(dev);
|
||||||
|
if (IS_ERR(mailbox)) {
|
||||||
|
err = -ENOMEM;
|
||||||
|
goto out_alloc;
|
||||||
|
}
|
||||||
|
mgm = mailbox->buf;
|
||||||
|
|
||||||
|
err = mlx4_READ_ENTRY(dev, index, mailbox);
|
||||||
|
if (err)
|
||||||
|
goto out_mailbox;
|
||||||
|
|
||||||
|
members_count = be32_to_cpu(mgm->members_count) & 0xffffff;
|
||||||
|
prot = be32_to_cpu(mgm->members_count) >> 30;
|
||||||
|
list_for_each_entry(pqp, &s_steer->promisc_qps[steer], list) {
|
||||||
|
/* don't add already existing qpn */
|
||||||
|
if (pqp->qpn == qpn)
|
||||||
|
continue;
|
||||||
|
if (members_count == MLX4_QP_PER_MGM) {
|
||||||
|
/* out of space */
|
||||||
|
err = -ENOMEM;
|
||||||
|
goto out_mailbox;
|
||||||
|
}
|
||||||
|
|
||||||
|
/* add the qpn */
|
||||||
|
mgm->qp[members_count++] = cpu_to_be32(pqp->qpn & MGM_QPN_MASK);
|
||||||
|
}
|
||||||
|
/* update the qps count and update the entry with all the promisc qps*/
|
||||||
|
mgm->members_count = cpu_to_be32(members_count | (prot << 30));
|
||||||
|
err = mlx4_WRITE_ENTRY(dev, index, mailbox);
|
||||||
|
|
||||||
|
out_mailbox:
|
||||||
|
mlx4_free_cmd_mailbox(dev, mailbox);
|
||||||
|
if (!err)
|
||||||
|
return 0;
|
||||||
|
out_alloc:
|
||||||
|
if (dqp) {
|
||||||
|
list_del(&dqp->list);
|
||||||
|
kfree(&dqp);
|
||||||
|
}
|
||||||
|
list_del(&new_entry->list);
|
||||||
|
kfree(new_entry);
|
||||||
|
return err;
|
||||||
|
}
|
||||||
|
|
||||||
|
/* update the data structures with existing steering entry */
|
||||||
|
static int existing_steering_entry(struct mlx4_dev *dev, u8 vep_num, u8 port,
|
||||||
|
enum mlx4_steer_type steer,
|
||||||
|
unsigned int index, u32 qpn)
|
||||||
|
{
|
||||||
|
struct mlx4_steer *s_steer;
|
||||||
|
struct mlx4_steer_index *tmp_entry, *entry = NULL;
|
||||||
|
struct mlx4_promisc_qp *pqp;
|
||||||
|
struct mlx4_promisc_qp *dqp;
|
||||||
|
u8 pf_num;
|
||||||
|
|
||||||
|
pf_num = (dev->caps.num_ports == 1) ? vep_num : (vep_num << 1) | (port - 1);
|
||||||
|
s_steer = &mlx4_priv(dev)->steer[pf_num];
|
||||||
|
|
||||||
|
pqp = get_promisc_qp(dev, pf_num, steer, qpn);
|
||||||
|
if (!pqp)
|
||||||
|
return 0; /* nothing to do */
|
||||||
|
|
||||||
|
list_for_each_entry(tmp_entry, &s_steer->steer_entries[steer], list) {
|
||||||
|
if (tmp_entry->index == index) {
|
||||||
|
entry = tmp_entry;
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if (unlikely(!entry)) {
|
||||||
|
mlx4_warn(dev, "Steering entry at index %x is not registered\n", index);
|
||||||
|
return -EINVAL;
|
||||||
|
}
|
||||||
|
|
||||||
|
/* the given qpn is listed as a promisc qpn
|
||||||
|
* we need to add it as a duplicate to this entry
|
||||||
|
* for future refernce */
|
||||||
|
list_for_each_entry(dqp, &entry->duplicates, list) {
|
||||||
|
if (qpn == dqp->qpn)
|
||||||
|
return 0; /* qp is already duplicated */
|
||||||
|
}
|
||||||
|
|
||||||
|
/* add the qp as a duplicate on this index */
|
||||||
|
dqp = kmalloc(sizeof *dqp, GFP_KERNEL);
|
||||||
|
if (!dqp)
|
||||||
|
return -ENOMEM;
|
||||||
|
dqp->qpn = qpn;
|
||||||
|
list_add_tail(&dqp->list, &entry->duplicates);
|
||||||
|
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
/* Check whether a qpn is a duplicate on steering entry
|
||||||
|
* If so, it should not be removed from mgm */
|
||||||
|
static bool check_duplicate_entry(struct mlx4_dev *dev, u8 vep_num, u8 port,
|
||||||
|
enum mlx4_steer_type steer,
|
||||||
|
unsigned int index, u32 qpn)
|
||||||
|
{
|
||||||
|
struct mlx4_steer *s_steer;
|
||||||
|
struct mlx4_steer_index *tmp_entry, *entry = NULL;
|
||||||
|
struct mlx4_promisc_qp *dqp, *tmp_dqp;
|
||||||
|
u8 pf_num;
|
||||||
|
|
||||||
|
pf_num = (dev->caps.num_ports == 1) ? vep_num : (vep_num << 1) | (port - 1);
|
||||||
|
s_steer = &mlx4_priv(dev)->steer[pf_num];
|
||||||
|
|
||||||
|
/* if qp is not promisc, it cannot be duplicated */
|
||||||
|
if (!get_promisc_qp(dev, pf_num, steer, qpn))
|
||||||
|
return false;
|
||||||
|
|
||||||
|
/* The qp is promisc qp so it is a duplicate on this index
|
||||||
|
* Find the index entry, and remove the duplicate */
|
||||||
|
list_for_each_entry(tmp_entry, &s_steer->steer_entries[steer], list) {
|
||||||
|
if (tmp_entry->index == index) {
|
||||||
|
entry = tmp_entry;
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if (unlikely(!entry)) {
|
||||||
|
mlx4_warn(dev, "Steering entry for index %x is not registered\n", index);
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
list_for_each_entry_safe(dqp, tmp_dqp, &entry->duplicates, list) {
|
||||||
|
if (dqp->qpn == qpn) {
|
||||||
|
list_del(&dqp->list);
|
||||||
|
kfree(dqp);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
|
||||||
|
/* I a steering entry contains only promisc QPs, it can be removed. */
|
||||||
|
static bool can_remove_steering_entry(struct mlx4_dev *dev, u8 vep_num, u8 port,
|
||||||
|
enum mlx4_steer_type steer,
|
||||||
|
unsigned int index, u32 tqpn)
|
||||||
|
{
|
||||||
|
struct mlx4_steer *s_steer;
|
||||||
|
struct mlx4_cmd_mailbox *mailbox;
|
||||||
|
struct mlx4_mgm *mgm;
|
||||||
|
struct mlx4_steer_index *entry = NULL, *tmp_entry;
|
||||||
|
u32 qpn;
|
||||||
|
u32 members_count;
|
||||||
|
bool ret = false;
|
||||||
|
int i;
|
||||||
|
u8 pf_num;
|
||||||
|
|
||||||
|
pf_num = (dev->caps.num_ports == 1) ? vep_num : (vep_num << 1) | (port - 1);
|
||||||
|
s_steer = &mlx4_priv(dev)->steer[pf_num];
|
||||||
|
|
||||||
|
mailbox = mlx4_alloc_cmd_mailbox(dev);
|
||||||
|
if (IS_ERR(mailbox))
|
||||||
|
return false;
|
||||||
|
mgm = mailbox->buf;
|
||||||
|
|
||||||
|
if (mlx4_READ_ENTRY(dev, index, mailbox))
|
||||||
|
goto out;
|
||||||
|
members_count = be32_to_cpu(mgm->members_count) & 0xffffff;
|
||||||
|
for (i = 0; i < members_count; i++) {
|
||||||
|
qpn = be32_to_cpu(mgm->qp[i]) & MGM_QPN_MASK;
|
||||||
|
if (!get_promisc_qp(dev, pf_num, steer, qpn) && qpn != tqpn) {
|
||||||
|
/* the qp is not promisc, the entry can't be removed */
|
||||||
|
goto out;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
/* All the qps currently registered for this entry are promiscuous,
|
||||||
|
* Checking for duplicates */
|
||||||
|
ret = true;
|
||||||
|
list_for_each_entry_safe(entry, tmp_entry, &s_steer->steer_entries[steer], list) {
|
||||||
|
if (entry->index == index) {
|
||||||
|
if (list_empty(&entry->duplicates)) {
|
||||||
|
list_del(&entry->list);
|
||||||
|
kfree(entry);
|
||||||
|
} else {
|
||||||
|
/* This entry contains duplicates so it shouldn't be removed */
|
||||||
|
ret = false;
|
||||||
|
goto out;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
out:
|
||||||
|
mlx4_free_cmd_mailbox(dev, mailbox);
|
||||||
|
return ret;
|
||||||
|
}
|
||||||
|
|
||||||
|
static int add_promisc_qp(struct mlx4_dev *dev, u8 vep_num, u8 port,
|
||||||
|
enum mlx4_steer_type steer, u32 qpn)
|
||||||
|
{
|
||||||
|
struct mlx4_steer *s_steer;
|
||||||
|
struct mlx4_cmd_mailbox *mailbox;
|
||||||
|
struct mlx4_mgm *mgm;
|
||||||
|
struct mlx4_steer_index *entry;
|
||||||
|
struct mlx4_promisc_qp *pqp;
|
||||||
|
struct mlx4_promisc_qp *dqp;
|
||||||
|
u32 members_count;
|
||||||
|
u32 prot;
|
||||||
|
int i;
|
||||||
|
bool found;
|
||||||
|
int last_index;
|
||||||
|
int err;
|
||||||
|
u8 pf_num;
|
||||||
|
struct mlx4_priv *priv = mlx4_priv(dev);
|
||||||
|
pf_num = (dev->caps.num_ports == 1) ? vep_num : (vep_num << 1) | (port - 1);
|
||||||
|
s_steer = &mlx4_priv(dev)->steer[pf_num];
|
||||||
|
|
||||||
|
mutex_lock(&priv->mcg_table.mutex);
|
||||||
|
|
||||||
|
if (get_promisc_qp(dev, pf_num, steer, qpn)) {
|
||||||
|
err = 0; /* Noting to do, already exists */
|
||||||
|
goto out_mutex;
|
||||||
|
}
|
||||||
|
|
||||||
|
pqp = kmalloc(sizeof *pqp, GFP_KERNEL);
|
||||||
|
if (!pqp) {
|
||||||
|
err = -ENOMEM;
|
||||||
|
goto out_mutex;
|
||||||
|
}
|
||||||
|
pqp->qpn = qpn;
|
||||||
|
|
||||||
|
mailbox = mlx4_alloc_cmd_mailbox(dev);
|
||||||
|
if (IS_ERR(mailbox)) {
|
||||||
|
err = -ENOMEM;
|
||||||
|
goto out_alloc;
|
||||||
|
}
|
||||||
|
mgm = mailbox->buf;
|
||||||
|
|
||||||
|
/* the promisc qp needs to be added for each one of the steering
|
||||||
|
* entries, if it already exists, needs to be added as a duplicate
|
||||||
|
* for this entry */
|
||||||
|
list_for_each_entry(entry, &s_steer->steer_entries[steer], list) {
|
||||||
|
err = mlx4_READ_ENTRY(dev, entry->index, mailbox);
|
||||||
|
if (err)
|
||||||
|
goto out_mailbox;
|
||||||
|
|
||||||
|
members_count = be32_to_cpu(mgm->members_count) & 0xffffff;
|
||||||
|
prot = be32_to_cpu(mgm->members_count) >> 30;
|
||||||
|
found = false;
|
||||||
|
for (i = 0; i < members_count; i++) {
|
||||||
|
if ((be32_to_cpu(mgm->qp[i]) & MGM_QPN_MASK) == qpn) {
|
||||||
|
/* Entry already exists, add to duplicates */
|
||||||
|
dqp = kmalloc(sizeof *dqp, GFP_KERNEL);
|
||||||
|
if (!dqp)
|
||||||
|
goto out_mailbox;
|
||||||
|
dqp->qpn = qpn;
|
||||||
|
list_add_tail(&dqp->list, &entry->duplicates);
|
||||||
|
found = true;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if (!found) {
|
||||||
|
/* Need to add the qpn to mgm */
|
||||||
|
if (members_count == MLX4_QP_PER_MGM) {
|
||||||
|
/* entry is full */
|
||||||
|
err = -ENOMEM;
|
||||||
|
goto out_mailbox;
|
||||||
|
}
|
||||||
|
mgm->qp[members_count++] = cpu_to_be32(qpn & MGM_QPN_MASK);
|
||||||
|
mgm->members_count = cpu_to_be32(members_count | (prot << 30));
|
||||||
|
err = mlx4_WRITE_ENTRY(dev, entry->index, mailbox);
|
||||||
|
if (err)
|
||||||
|
goto out_mailbox;
|
||||||
|
}
|
||||||
|
last_index = entry->index;
|
||||||
|
}
|
||||||
|
|
||||||
|
/* add the new qpn to list of promisc qps */
|
||||||
|
list_add_tail(&pqp->list, &s_steer->promisc_qps[steer]);
|
||||||
|
/* now need to add all the promisc qps to default entry */
|
||||||
|
memset(mgm, 0, sizeof *mgm);
|
||||||
|
members_count = 0;
|
||||||
|
list_for_each_entry(dqp, &s_steer->promisc_qps[steer], list)
|
||||||
|
mgm->qp[members_count++] = cpu_to_be32(dqp->qpn & MGM_QPN_MASK);
|
||||||
|
mgm->members_count = cpu_to_be32(members_count | MLX4_PROT_ETH << 30);
|
||||||
|
|
||||||
|
err = mlx4_WRITE_PROMISC(dev, vep_num, port, steer, mailbox);
|
||||||
|
if (err)
|
||||||
|
goto out_list;
|
||||||
|
|
||||||
|
mlx4_free_cmd_mailbox(dev, mailbox);
|
||||||
|
mutex_unlock(&priv->mcg_table.mutex);
|
||||||
|
return 0;
|
||||||
|
|
||||||
|
out_list:
|
||||||
|
list_del(&pqp->list);
|
||||||
|
out_mailbox:
|
||||||
|
mlx4_free_cmd_mailbox(dev, mailbox);
|
||||||
|
out_alloc:
|
||||||
|
kfree(pqp);
|
||||||
|
out_mutex:
|
||||||
|
mutex_unlock(&priv->mcg_table.mutex);
|
||||||
|
return err;
|
||||||
|
}
|
||||||
|
|
||||||
|
static int remove_promisc_qp(struct mlx4_dev *dev, u8 vep_num, u8 port,
|
||||||
|
enum mlx4_steer_type steer, u32 qpn)
|
||||||
|
{
|
||||||
|
struct mlx4_priv *priv = mlx4_priv(dev);
|
||||||
|
struct mlx4_steer *s_steer;
|
||||||
|
struct mlx4_cmd_mailbox *mailbox;
|
||||||
|
struct mlx4_mgm *mgm;
|
||||||
|
struct mlx4_steer_index *entry;
|
||||||
|
struct mlx4_promisc_qp *pqp;
|
||||||
|
struct mlx4_promisc_qp *dqp;
|
||||||
|
u32 members_count;
|
||||||
|
bool found;
|
||||||
|
bool back_to_list = false;
|
||||||
|
int loc, i;
|
||||||
|
int err;
|
||||||
|
u8 pf_num;
|
||||||
|
|
||||||
|
pf_num = (dev->caps.num_ports == 1) ? vep_num : (vep_num << 1) | (port - 1);
|
||||||
|
s_steer = &mlx4_priv(dev)->steer[pf_num];
|
||||||
|
mutex_lock(&priv->mcg_table.mutex);
|
||||||
|
|
||||||
|
pqp = get_promisc_qp(dev, pf_num, steer, qpn);
|
||||||
|
if (unlikely(!pqp)) {
|
||||||
|
mlx4_warn(dev, "QP %x is not promiscuous QP\n", qpn);
|
||||||
|
/* nothing to do */
|
||||||
|
err = 0;
|
||||||
|
goto out_mutex;
|
||||||
|
}
|
||||||
|
|
||||||
|
/*remove from list of promisc qps */
|
||||||
|
list_del(&pqp->list);
|
||||||
|
kfree(pqp);
|
||||||
|
|
||||||
|
/* set the default entry not to include the removed one */
|
||||||
|
mailbox = mlx4_alloc_cmd_mailbox(dev);
|
||||||
|
if (IS_ERR(mailbox)) {
|
||||||
|
err = -ENOMEM;
|
||||||
|
back_to_list = true;
|
||||||
|
goto out_list;
|
||||||
|
}
|
||||||
|
mgm = mailbox->buf;
|
||||||
|
members_count = 0;
|
||||||
|
list_for_each_entry(dqp, &s_steer->promisc_qps[steer], list)
|
||||||
|
mgm->qp[members_count++] = cpu_to_be32(dqp->qpn & MGM_QPN_MASK);
|
||||||
|
mgm->members_count = cpu_to_be32(members_count | MLX4_PROT_ETH << 30);
|
||||||
|
|
||||||
|
err = mlx4_WRITE_PROMISC(dev, vep_num, port, steer, mailbox);
|
||||||
|
if (err)
|
||||||
|
goto out_mailbox;
|
||||||
|
|
||||||
|
/* remove the qp from all the steering entries*/
|
||||||
|
list_for_each_entry(entry, &s_steer->steer_entries[steer], list) {
|
||||||
|
found = false;
|
||||||
|
list_for_each_entry(dqp, &entry->duplicates, list) {
|
||||||
|
if (dqp->qpn == qpn) {
|
||||||
|
found = true;
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if (found) {
|
||||||
|
/* a duplicate, no need to change the mgm,
|
||||||
|
* only update the duplicates list */
|
||||||
|
list_del(&dqp->list);
|
||||||
|
kfree(dqp);
|
||||||
|
} else {
|
||||||
|
err = mlx4_READ_ENTRY(dev, entry->index, mailbox);
|
||||||
|
if (err)
|
||||||
|
goto out_mailbox;
|
||||||
|
members_count = be32_to_cpu(mgm->members_count) & 0xffffff;
|
||||||
|
for (loc = -1, i = 0; i < members_count; ++i)
|
||||||
|
if ((be32_to_cpu(mgm->qp[i]) & MGM_QPN_MASK) == qpn)
|
||||||
|
loc = i;
|
||||||
|
|
||||||
|
mgm->members_count = cpu_to_be32(--members_count |
|
||||||
|
(MLX4_PROT_ETH << 30));
|
||||||
|
mgm->qp[loc] = mgm->qp[i - 1];
|
||||||
|
mgm->qp[i - 1] = 0;
|
||||||
|
|
||||||
|
err = mlx4_WRITE_ENTRY(dev, entry->index, mailbox);
|
||||||
|
if (err)
|
||||||
|
goto out_mailbox;
|
||||||
|
}
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
out_mailbox:
|
||||||
|
mlx4_free_cmd_mailbox(dev, mailbox);
|
||||||
|
out_list:
|
||||||
|
if (back_to_list)
|
||||||
|
list_add_tail(&pqp->list, &s_steer->promisc_qps[steer]);
|
||||||
|
out_mutex:
|
||||||
|
mutex_unlock(&priv->mcg_table.mutex);
|
||||||
|
return err;
|
||||||
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Caller must hold MCG table semaphore. gid and mgm parameters must
|
* Caller must hold MCG table semaphore. gid and mgm parameters must
|
||||||
* be properly aligned for command interface.
|
* be properly aligned for command interface.
|
||||||
@ -94,15 +548,17 @@ static int mlx4_MGID_HASH(struct mlx4_dev *dev, struct mlx4_cmd_mailbox *mailbox
|
|||||||
* If no AMGM exists for given gid, *index = -1, *prev = index of last
|
* If no AMGM exists for given gid, *index = -1, *prev = index of last
|
||||||
* entry in hash chain and *mgm holds end of hash chain.
|
* entry in hash chain and *mgm holds end of hash chain.
|
||||||
*/
|
*/
|
||||||
static int find_mgm(struct mlx4_dev *dev,
|
static int find_entry(struct mlx4_dev *dev, u8 port,
|
||||||
u8 *gid, enum mlx4_protocol protocol,
|
u8 *gid, enum mlx4_protocol prot,
|
||||||
struct mlx4_cmd_mailbox *mgm_mailbox,
|
enum mlx4_steer_type steer,
|
||||||
u16 *hash, int *prev, int *index)
|
struct mlx4_cmd_mailbox *mgm_mailbox,
|
||||||
|
u16 *hash, int *prev, int *index)
|
||||||
{
|
{
|
||||||
struct mlx4_cmd_mailbox *mailbox;
|
struct mlx4_cmd_mailbox *mailbox;
|
||||||
struct mlx4_mgm *mgm = mgm_mailbox->buf;
|
struct mlx4_mgm *mgm = mgm_mailbox->buf;
|
||||||
u8 *mgid;
|
u8 *mgid;
|
||||||
int err;
|
int err;
|
||||||
|
u8 op_mod = (prot == MLX4_PROT_ETH) ? !!(dev->caps.vep_mc_steering) : 0;
|
||||||
|
|
||||||
mailbox = mlx4_alloc_cmd_mailbox(dev);
|
mailbox = mlx4_alloc_cmd_mailbox(dev);
|
||||||
if (IS_ERR(mailbox))
|
if (IS_ERR(mailbox))
|
||||||
@ -111,7 +567,7 @@ static int find_mgm(struct mlx4_dev *dev,
|
|||||||
|
|
||||||
memcpy(mgid, gid, 16);
|
memcpy(mgid, gid, 16);
|
||||||
|
|
||||||
err = mlx4_MGID_HASH(dev, mailbox, hash);
|
err = mlx4_GID_HASH(dev, mailbox, hash, op_mod);
|
||||||
mlx4_free_cmd_mailbox(dev, mailbox);
|
mlx4_free_cmd_mailbox(dev, mailbox);
|
||||||
if (err)
|
if (err)
|
||||||
return err;
|
return err;
|
||||||
@ -123,11 +579,11 @@ static int find_mgm(struct mlx4_dev *dev,
|
|||||||
*prev = -1;
|
*prev = -1;
|
||||||
|
|
||||||
do {
|
do {
|
||||||
err = mlx4_READ_MCG(dev, *index, mgm_mailbox);
|
err = mlx4_READ_ENTRY(dev, *index, mgm_mailbox);
|
||||||
if (err)
|
if (err)
|
||||||
return err;
|
return err;
|
||||||
|
|
||||||
if (!memcmp(mgm->gid, zero_gid, 16)) {
|
if (!(be32_to_cpu(mgm->members_count) & 0xffffff)) {
|
||||||
if (*index != *hash) {
|
if (*index != *hash) {
|
||||||
mlx4_err(dev, "Found zero MGID in AMGM.\n");
|
mlx4_err(dev, "Found zero MGID in AMGM.\n");
|
||||||
err = -EINVAL;
|
err = -EINVAL;
|
||||||
@ -136,7 +592,7 @@ static int find_mgm(struct mlx4_dev *dev,
|
|||||||
}
|
}
|
||||||
|
|
||||||
if (!memcmp(mgm->gid, gid, 16) &&
|
if (!memcmp(mgm->gid, gid, 16) &&
|
||||||
be32_to_cpu(mgm->members_count) >> 30 == protocol)
|
be32_to_cpu(mgm->members_count) >> 30 == prot)
|
||||||
return err;
|
return err;
|
||||||
|
|
||||||
*prev = *index;
|
*prev = *index;
|
||||||
@ -147,8 +603,9 @@ static int find_mgm(struct mlx4_dev *dev,
|
|||||||
return err;
|
return err;
|
||||||
}
|
}
|
||||||
|
|
||||||
int mlx4_multicast_attach(struct mlx4_dev *dev, struct mlx4_qp *qp, u8 gid[16],
|
int mlx4_qp_attach_common(struct mlx4_dev *dev, struct mlx4_qp *qp, u8 gid[16],
|
||||||
int block_mcast_loopback, enum mlx4_protocol protocol)
|
int block_mcast_loopback, enum mlx4_protocol prot,
|
||||||
|
enum mlx4_steer_type steer)
|
||||||
{
|
{
|
||||||
struct mlx4_priv *priv = mlx4_priv(dev);
|
struct mlx4_priv *priv = mlx4_priv(dev);
|
||||||
struct mlx4_cmd_mailbox *mailbox;
|
struct mlx4_cmd_mailbox *mailbox;
|
||||||
@ -159,6 +616,8 @@ int mlx4_multicast_attach(struct mlx4_dev *dev, struct mlx4_qp *qp, u8 gid[16],
|
|||||||
int link = 0;
|
int link = 0;
|
||||||
int i;
|
int i;
|
||||||
int err;
|
int err;
|
||||||
|
u8 port = gid[5];
|
||||||
|
u8 new_entry = 0;
|
||||||
|
|
||||||
mailbox = mlx4_alloc_cmd_mailbox(dev);
|
mailbox = mlx4_alloc_cmd_mailbox(dev);
|
||||||
if (IS_ERR(mailbox))
|
if (IS_ERR(mailbox))
|
||||||
@ -166,14 +625,16 @@ int mlx4_multicast_attach(struct mlx4_dev *dev, struct mlx4_qp *qp, u8 gid[16],
|
|||||||
mgm = mailbox->buf;
|
mgm = mailbox->buf;
|
||||||
|
|
||||||
mutex_lock(&priv->mcg_table.mutex);
|
mutex_lock(&priv->mcg_table.mutex);
|
||||||
|
err = find_entry(dev, port, gid, prot, steer,
|
||||||
err = find_mgm(dev, gid, protocol, mailbox, &hash, &prev, &index);
|
mailbox, &hash, &prev, &index);
|
||||||
if (err)
|
if (err)
|
||||||
goto out;
|
goto out;
|
||||||
|
|
||||||
if (index != -1) {
|
if (index != -1) {
|
||||||
if (!memcmp(mgm->gid, zero_gid, 16))
|
if (!(be32_to_cpu(mgm->members_count) & 0xffffff)) {
|
||||||
|
new_entry = 1;
|
||||||
memcpy(mgm->gid, gid, 16);
|
memcpy(mgm->gid, gid, 16);
|
||||||
|
}
|
||||||
} else {
|
} else {
|
||||||
link = 1;
|
link = 1;
|
||||||
|
|
||||||
@ -209,26 +670,34 @@ int mlx4_multicast_attach(struct mlx4_dev *dev, struct mlx4_qp *qp, u8 gid[16],
|
|||||||
else
|
else
|
||||||
mgm->qp[members_count++] = cpu_to_be32(qp->qpn & MGM_QPN_MASK);
|
mgm->qp[members_count++] = cpu_to_be32(qp->qpn & MGM_QPN_MASK);
|
||||||
|
|
||||||
mgm->members_count = cpu_to_be32(members_count | (u32) protocol << 30);
|
mgm->members_count = cpu_to_be32(members_count | (u32) prot << 30);
|
||||||
|
|
||||||
err = mlx4_WRITE_MCG(dev, index, mailbox);
|
err = mlx4_WRITE_ENTRY(dev, index, mailbox);
|
||||||
if (err)
|
if (err)
|
||||||
goto out;
|
goto out;
|
||||||
|
|
||||||
if (!link)
|
if (!link)
|
||||||
goto out;
|
goto out;
|
||||||
|
|
||||||
err = mlx4_READ_MCG(dev, prev, mailbox);
|
err = mlx4_READ_ENTRY(dev, prev, mailbox);
|
||||||
if (err)
|
if (err)
|
||||||
goto out;
|
goto out;
|
||||||
|
|
||||||
mgm->next_gid_index = cpu_to_be32(index << 6);
|
mgm->next_gid_index = cpu_to_be32(index << 6);
|
||||||
|
|
||||||
err = mlx4_WRITE_MCG(dev, prev, mailbox);
|
err = mlx4_WRITE_ENTRY(dev, prev, mailbox);
|
||||||
if (err)
|
if (err)
|
||||||
goto out;
|
goto out;
|
||||||
|
|
||||||
out:
|
out:
|
||||||
|
if (prot == MLX4_PROT_ETH) {
|
||||||
|
/* manage the steering entry for promisc mode */
|
||||||
|
if (new_entry)
|
||||||
|
new_steering_entry(dev, 0, port, steer, index, qp->qpn);
|
||||||
|
else
|
||||||
|
existing_steering_entry(dev, 0, port, steer,
|
||||||
|
index, qp->qpn);
|
||||||
|
}
|
||||||
if (err && link && index != -1) {
|
if (err && link && index != -1) {
|
||||||
if (index < dev->caps.num_mgms)
|
if (index < dev->caps.num_mgms)
|
||||||
mlx4_warn(dev, "Got AMGM index %d < %d",
|
mlx4_warn(dev, "Got AMGM index %d < %d",
|
||||||
@ -242,10 +711,9 @@ out:
|
|||||||
mlx4_free_cmd_mailbox(dev, mailbox);
|
mlx4_free_cmd_mailbox(dev, mailbox);
|
||||||
return err;
|
return err;
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL_GPL(mlx4_multicast_attach);
|
|
||||||
|
|
||||||
int mlx4_multicast_detach(struct mlx4_dev *dev, struct mlx4_qp *qp, u8 gid[16],
|
int mlx4_qp_detach_common(struct mlx4_dev *dev, struct mlx4_qp *qp, u8 gid[16],
|
||||||
enum mlx4_protocol protocol)
|
enum mlx4_protocol prot, enum mlx4_steer_type steer)
|
||||||
{
|
{
|
||||||
struct mlx4_priv *priv = mlx4_priv(dev);
|
struct mlx4_priv *priv = mlx4_priv(dev);
|
||||||
struct mlx4_cmd_mailbox *mailbox;
|
struct mlx4_cmd_mailbox *mailbox;
|
||||||
@ -255,6 +723,8 @@ int mlx4_multicast_detach(struct mlx4_dev *dev, struct mlx4_qp *qp, u8 gid[16],
|
|||||||
int prev, index;
|
int prev, index;
|
||||||
int i, loc;
|
int i, loc;
|
||||||
int err;
|
int err;
|
||||||
|
u8 port = gid[5];
|
||||||
|
bool removed_entry = false;
|
||||||
|
|
||||||
mailbox = mlx4_alloc_cmd_mailbox(dev);
|
mailbox = mlx4_alloc_cmd_mailbox(dev);
|
||||||
if (IS_ERR(mailbox))
|
if (IS_ERR(mailbox))
|
||||||
@ -263,7 +733,8 @@ int mlx4_multicast_detach(struct mlx4_dev *dev, struct mlx4_qp *qp, u8 gid[16],
|
|||||||
|
|
||||||
mutex_lock(&priv->mcg_table.mutex);
|
mutex_lock(&priv->mcg_table.mutex);
|
||||||
|
|
||||||
err = find_mgm(dev, gid, protocol, mailbox, &hash, &prev, &index);
|
err = find_entry(dev, port, gid, prot, steer,
|
||||||
|
mailbox, &hash, &prev, &index);
|
||||||
if (err)
|
if (err)
|
||||||
goto out;
|
goto out;
|
||||||
|
|
||||||
@ -273,6 +744,11 @@ int mlx4_multicast_detach(struct mlx4_dev *dev, struct mlx4_qp *qp, u8 gid[16],
|
|||||||
goto out;
|
goto out;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/* if this pq is also a promisc qp, it shouldn't be removed */
|
||||||
|
if (prot == MLX4_PROT_ETH &&
|
||||||
|
check_duplicate_entry(dev, 0, port, steer, index, qp->qpn))
|
||||||
|
goto out;
|
||||||
|
|
||||||
members_count = be32_to_cpu(mgm->members_count) & 0xffffff;
|
members_count = be32_to_cpu(mgm->members_count) & 0xffffff;
|
||||||
for (loc = -1, i = 0; i < members_count; ++i)
|
for (loc = -1, i = 0; i < members_count; ++i)
|
||||||
if ((be32_to_cpu(mgm->qp[i]) & MGM_QPN_MASK) == qp->qpn)
|
if ((be32_to_cpu(mgm->qp[i]) & MGM_QPN_MASK) == qp->qpn)
|
||||||
@ -285,26 +761,31 @@ int mlx4_multicast_detach(struct mlx4_dev *dev, struct mlx4_qp *qp, u8 gid[16],
|
|||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
mgm->members_count = cpu_to_be32(--members_count | (u32) protocol << 30);
|
mgm->members_count = cpu_to_be32(--members_count | (u32) prot << 30);
|
||||||
mgm->qp[loc] = mgm->qp[i - 1];
|
mgm->qp[loc] = mgm->qp[i - 1];
|
||||||
mgm->qp[i - 1] = 0;
|
mgm->qp[i - 1] = 0;
|
||||||
|
|
||||||
if (i != 1) {
|
if (prot == MLX4_PROT_ETH)
|
||||||
err = mlx4_WRITE_MCG(dev, index, mailbox);
|
removed_entry = can_remove_steering_entry(dev, 0, port, steer, index, qp->qpn);
|
||||||
|
if (i != 1 && (prot != MLX4_PROT_ETH || !removed_entry)) {
|
||||||
|
err = mlx4_WRITE_ENTRY(dev, index, mailbox);
|
||||||
goto out;
|
goto out;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/* We are going to delete the entry, members count should be 0 */
|
||||||
|
mgm->members_count = cpu_to_be32((u32) prot << 30);
|
||||||
|
|
||||||
if (prev == -1) {
|
if (prev == -1) {
|
||||||
/* Remove entry from MGM */
|
/* Remove entry from MGM */
|
||||||
int amgm_index = be32_to_cpu(mgm->next_gid_index) >> 6;
|
int amgm_index = be32_to_cpu(mgm->next_gid_index) >> 6;
|
||||||
if (amgm_index) {
|
if (amgm_index) {
|
||||||
err = mlx4_READ_MCG(dev, amgm_index, mailbox);
|
err = mlx4_READ_ENTRY(dev, amgm_index, mailbox);
|
||||||
if (err)
|
if (err)
|
||||||
goto out;
|
goto out;
|
||||||
} else
|
} else
|
||||||
memset(mgm->gid, 0, 16);
|
memset(mgm->gid, 0, 16);
|
||||||
|
|
||||||
err = mlx4_WRITE_MCG(dev, index, mailbox);
|
err = mlx4_WRITE_ENTRY(dev, index, mailbox);
|
||||||
if (err)
|
if (err)
|
||||||
goto out;
|
goto out;
|
||||||
|
|
||||||
@ -319,13 +800,13 @@ int mlx4_multicast_detach(struct mlx4_dev *dev, struct mlx4_qp *qp, u8 gid[16],
|
|||||||
} else {
|
} else {
|
||||||
/* Remove entry from AMGM */
|
/* Remove entry from AMGM */
|
||||||
int cur_next_index = be32_to_cpu(mgm->next_gid_index) >> 6;
|
int cur_next_index = be32_to_cpu(mgm->next_gid_index) >> 6;
|
||||||
err = mlx4_READ_MCG(dev, prev, mailbox);
|
err = mlx4_READ_ENTRY(dev, prev, mailbox);
|
||||||
if (err)
|
if (err)
|
||||||
goto out;
|
goto out;
|
||||||
|
|
||||||
mgm->next_gid_index = cpu_to_be32(cur_next_index << 6);
|
mgm->next_gid_index = cpu_to_be32(cur_next_index << 6);
|
||||||
|
|
||||||
err = mlx4_WRITE_MCG(dev, prev, mailbox);
|
err = mlx4_WRITE_ENTRY(dev, prev, mailbox);
|
||||||
if (err)
|
if (err)
|
||||||
goto out;
|
goto out;
|
||||||
|
|
||||||
@ -343,8 +824,85 @@ out:
|
|||||||
mlx4_free_cmd_mailbox(dev, mailbox);
|
mlx4_free_cmd_mailbox(dev, mailbox);
|
||||||
return err;
|
return err;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
|
int mlx4_multicast_attach(struct mlx4_dev *dev, struct mlx4_qp *qp, u8 gid[16],
|
||||||
|
int block_mcast_loopback, enum mlx4_protocol prot)
|
||||||
|
{
|
||||||
|
enum mlx4_steer_type steer;
|
||||||
|
|
||||||
|
steer = (is_valid_ether_addr(&gid[10])) ? MLX4_UC_STEER : MLX4_MC_STEER;
|
||||||
|
|
||||||
|
if (prot == MLX4_PROT_ETH && !dev->caps.vep_mc_steering)
|
||||||
|
return 0;
|
||||||
|
|
||||||
|
if (prot == MLX4_PROT_ETH)
|
||||||
|
gid[7] |= (steer << 1);
|
||||||
|
|
||||||
|
return mlx4_qp_attach_common(dev, qp, gid,
|
||||||
|
block_mcast_loopback, prot,
|
||||||
|
steer);
|
||||||
|
}
|
||||||
|
EXPORT_SYMBOL_GPL(mlx4_multicast_attach);
|
||||||
|
|
||||||
|
int mlx4_multicast_detach(struct mlx4_dev *dev, struct mlx4_qp *qp, u8 gid[16],
|
||||||
|
enum mlx4_protocol prot)
|
||||||
|
{
|
||||||
|
enum mlx4_steer_type steer;
|
||||||
|
|
||||||
|
steer = (is_valid_ether_addr(&gid[10])) ? MLX4_UC_STEER : MLX4_MC_STEER;
|
||||||
|
|
||||||
|
if (prot == MLX4_PROT_ETH && !dev->caps.vep_mc_steering)
|
||||||
|
return 0;
|
||||||
|
|
||||||
|
if (prot == MLX4_PROT_ETH) {
|
||||||
|
gid[7] |= (steer << 1);
|
||||||
|
}
|
||||||
|
|
||||||
|
return mlx4_qp_detach_common(dev, qp, gid, prot, steer);
|
||||||
|
}
|
||||||
EXPORT_SYMBOL_GPL(mlx4_multicast_detach);
|
EXPORT_SYMBOL_GPL(mlx4_multicast_detach);
|
||||||
|
|
||||||
|
|
||||||
|
int mlx4_multicast_promisc_add(struct mlx4_dev *dev, u32 qpn, u8 port)
|
||||||
|
{
|
||||||
|
if (!dev->caps.vep_mc_steering)
|
||||||
|
return 0;
|
||||||
|
|
||||||
|
|
||||||
|
return add_promisc_qp(dev, 0, port, MLX4_MC_STEER, qpn);
|
||||||
|
}
|
||||||
|
EXPORT_SYMBOL_GPL(mlx4_multicast_promisc_add);
|
||||||
|
|
||||||
|
int mlx4_multicast_promisc_remove(struct mlx4_dev *dev, u32 qpn, u8 port)
|
||||||
|
{
|
||||||
|
if (!dev->caps.vep_mc_steering)
|
||||||
|
return 0;
|
||||||
|
|
||||||
|
|
||||||
|
return remove_promisc_qp(dev, 0, port, MLX4_MC_STEER, qpn);
|
||||||
|
}
|
||||||
|
EXPORT_SYMBOL_GPL(mlx4_multicast_promisc_remove);
|
||||||
|
|
||||||
|
int mlx4_unicast_promisc_add(struct mlx4_dev *dev, u32 qpn, u8 port)
|
||||||
|
{
|
||||||
|
if (!dev->caps.vep_mc_steering)
|
||||||
|
return 0;
|
||||||
|
|
||||||
|
|
||||||
|
return add_promisc_qp(dev, 0, port, MLX4_UC_STEER, qpn);
|
||||||
|
}
|
||||||
|
EXPORT_SYMBOL_GPL(mlx4_unicast_promisc_add);
|
||||||
|
|
||||||
|
int mlx4_unicast_promisc_remove(struct mlx4_dev *dev, u32 qpn, u8 port)
|
||||||
|
{
|
||||||
|
if (!dev->caps.vep_mc_steering)
|
||||||
|
return 0;
|
||||||
|
|
||||||
|
return remove_promisc_qp(dev, 0, port, MLX4_UC_STEER, qpn);
|
||||||
|
}
|
||||||
|
EXPORT_SYMBOL_GPL(mlx4_unicast_promisc_remove);
|
||||||
|
|
||||||
int mlx4_init_mcg_table(struct mlx4_dev *dev)
|
int mlx4_init_mcg_table(struct mlx4_dev *dev)
|
||||||
{
|
{
|
||||||
struct mlx4_priv *priv = mlx4_priv(dev);
|
struct mlx4_priv *priv = mlx4_priv(dev);
|
||||||
|
@ -105,6 +105,7 @@ struct mlx4_bitmap {
|
|||||||
u32 max;
|
u32 max;
|
||||||
u32 reserved_top;
|
u32 reserved_top;
|
||||||
u32 mask;
|
u32 mask;
|
||||||
|
u32 avail;
|
||||||
spinlock_t lock;
|
spinlock_t lock;
|
||||||
unsigned long *table;
|
unsigned long *table;
|
||||||
};
|
};
|
||||||
@ -162,6 +163,27 @@ struct mlx4_fw {
|
|||||||
u8 catas_bar;
|
u8 catas_bar;
|
||||||
};
|
};
|
||||||
|
|
||||||
|
#define MGM_QPN_MASK 0x00FFFFFF
|
||||||
|
#define MGM_BLCK_LB_BIT 30
|
||||||
|
|
||||||
|
struct mlx4_promisc_qp {
|
||||||
|
struct list_head list;
|
||||||
|
u32 qpn;
|
||||||
|
};
|
||||||
|
|
||||||
|
struct mlx4_steer_index {
|
||||||
|
struct list_head list;
|
||||||
|
unsigned int index;
|
||||||
|
struct list_head duplicates;
|
||||||
|
};
|
||||||
|
|
||||||
|
struct mlx4_mgm {
|
||||||
|
__be32 next_gid_index;
|
||||||
|
__be32 members_count;
|
||||||
|
u32 reserved[2];
|
||||||
|
u8 gid[16];
|
||||||
|
__be32 qp[MLX4_QP_PER_MGM];
|
||||||
|
};
|
||||||
struct mlx4_cmd {
|
struct mlx4_cmd {
|
||||||
struct pci_pool *pool;
|
struct pci_pool *pool;
|
||||||
void __iomem *hcr;
|
void __iomem *hcr;
|
||||||
@ -265,6 +287,10 @@ struct mlx4_vlan_table {
|
|||||||
int max;
|
int max;
|
||||||
};
|
};
|
||||||
|
|
||||||
|
struct mlx4_mac_entry {
|
||||||
|
u64 mac;
|
||||||
|
};
|
||||||
|
|
||||||
struct mlx4_port_info {
|
struct mlx4_port_info {
|
||||||
struct mlx4_dev *dev;
|
struct mlx4_dev *dev;
|
||||||
int port;
|
int port;
|
||||||
@ -272,7 +298,9 @@ struct mlx4_port_info {
|
|||||||
struct device_attribute port_attr;
|
struct device_attribute port_attr;
|
||||||
enum mlx4_port_type tmp_type;
|
enum mlx4_port_type tmp_type;
|
||||||
struct mlx4_mac_table mac_table;
|
struct mlx4_mac_table mac_table;
|
||||||
|
struct radix_tree_root mac_tree;
|
||||||
struct mlx4_vlan_table vlan_table;
|
struct mlx4_vlan_table vlan_table;
|
||||||
|
int base_qpn;
|
||||||
};
|
};
|
||||||
|
|
||||||
struct mlx4_sense {
|
struct mlx4_sense {
|
||||||
@ -282,6 +310,17 @@ struct mlx4_sense {
|
|||||||
struct delayed_work sense_poll;
|
struct delayed_work sense_poll;
|
||||||
};
|
};
|
||||||
|
|
||||||
|
struct mlx4_msix_ctl {
|
||||||
|
u64 pool_bm;
|
||||||
|
spinlock_t pool_lock;
|
||||||
|
};
|
||||||
|
|
||||||
|
struct mlx4_steer {
|
||||||
|
struct list_head promisc_qps[MLX4_NUM_STEERS];
|
||||||
|
struct list_head steer_entries[MLX4_NUM_STEERS];
|
||||||
|
struct list_head high_prios;
|
||||||
|
};
|
||||||
|
|
||||||
struct mlx4_priv {
|
struct mlx4_priv {
|
||||||
struct mlx4_dev dev;
|
struct mlx4_dev dev;
|
||||||
|
|
||||||
@ -313,6 +352,11 @@ struct mlx4_priv {
|
|||||||
struct mlx4_port_info port[MLX4_MAX_PORTS + 1];
|
struct mlx4_port_info port[MLX4_MAX_PORTS + 1];
|
||||||
struct mlx4_sense sense;
|
struct mlx4_sense sense;
|
||||||
struct mutex port_mutex;
|
struct mutex port_mutex;
|
||||||
|
struct mlx4_msix_ctl msix_ctl;
|
||||||
|
struct mlx4_steer *steer;
|
||||||
|
struct list_head bf_list;
|
||||||
|
struct mutex bf_mutex;
|
||||||
|
struct io_mapping *bf_mapping;
|
||||||
};
|
};
|
||||||
|
|
||||||
static inline struct mlx4_priv *mlx4_priv(struct mlx4_dev *dev)
|
static inline struct mlx4_priv *mlx4_priv(struct mlx4_dev *dev)
|
||||||
@ -328,6 +372,7 @@ u32 mlx4_bitmap_alloc(struct mlx4_bitmap *bitmap);
|
|||||||
void mlx4_bitmap_free(struct mlx4_bitmap *bitmap, u32 obj);
|
void mlx4_bitmap_free(struct mlx4_bitmap *bitmap, u32 obj);
|
||||||
u32 mlx4_bitmap_alloc_range(struct mlx4_bitmap *bitmap, int cnt, int align);
|
u32 mlx4_bitmap_alloc_range(struct mlx4_bitmap *bitmap, int cnt, int align);
|
||||||
void mlx4_bitmap_free_range(struct mlx4_bitmap *bitmap, u32 obj, int cnt);
|
void mlx4_bitmap_free_range(struct mlx4_bitmap *bitmap, u32 obj, int cnt);
|
||||||
|
u32 mlx4_bitmap_avail(struct mlx4_bitmap *bitmap);
|
||||||
int mlx4_bitmap_init(struct mlx4_bitmap *bitmap, u32 num, u32 mask,
|
int mlx4_bitmap_init(struct mlx4_bitmap *bitmap, u32 num, u32 mask,
|
||||||
u32 reserved_bot, u32 resetrved_top);
|
u32 reserved_bot, u32 resetrved_top);
|
||||||
void mlx4_bitmap_cleanup(struct mlx4_bitmap *bitmap);
|
void mlx4_bitmap_cleanup(struct mlx4_bitmap *bitmap);
|
||||||
@ -403,4 +448,9 @@ void mlx4_init_vlan_table(struct mlx4_dev *dev, struct mlx4_vlan_table *table);
|
|||||||
int mlx4_SET_PORT(struct mlx4_dev *dev, u8 port);
|
int mlx4_SET_PORT(struct mlx4_dev *dev, u8 port);
|
||||||
int mlx4_get_port_ib_caps(struct mlx4_dev *dev, u8 port, __be32 *caps);
|
int mlx4_get_port_ib_caps(struct mlx4_dev *dev, u8 port, __be32 *caps);
|
||||||
|
|
||||||
|
int mlx4_qp_detach_common(struct mlx4_dev *dev, struct mlx4_qp *qp, u8 gid[16],
|
||||||
|
enum mlx4_protocol prot, enum mlx4_steer_type steer);
|
||||||
|
int mlx4_qp_attach_common(struct mlx4_dev *dev, struct mlx4_qp *qp, u8 gid[16],
|
||||||
|
int block_mcast_loopback, enum mlx4_protocol prot,
|
||||||
|
enum mlx4_steer_type steer);
|
||||||
#endif /* MLX4_H */
|
#endif /* MLX4_H */
|
||||||
|
@ -49,8 +49,8 @@
|
|||||||
#include "en_port.h"
|
#include "en_port.h"
|
||||||
|
|
||||||
#define DRV_NAME "mlx4_en"
|
#define DRV_NAME "mlx4_en"
|
||||||
#define DRV_VERSION "1.5.1.6"
|
#define DRV_VERSION "1.5.4.1"
|
||||||
#define DRV_RELDATE "August 2010"
|
#define DRV_RELDATE "March 2011"
|
||||||
|
|
||||||
#define MLX4_EN_MSG_LEVEL (NETIF_MSG_LINK | NETIF_MSG_IFDOWN)
|
#define MLX4_EN_MSG_LEVEL (NETIF_MSG_LINK | NETIF_MSG_IFDOWN)
|
||||||
|
|
||||||
@ -62,6 +62,7 @@
|
|||||||
#define MLX4_EN_PAGE_SHIFT 12
|
#define MLX4_EN_PAGE_SHIFT 12
|
||||||
#define MLX4_EN_PAGE_SIZE (1 << MLX4_EN_PAGE_SHIFT)
|
#define MLX4_EN_PAGE_SIZE (1 << MLX4_EN_PAGE_SHIFT)
|
||||||
#define MAX_RX_RINGS 16
|
#define MAX_RX_RINGS 16
|
||||||
|
#define MIN_RX_RINGS 4
|
||||||
#define TXBB_SIZE 64
|
#define TXBB_SIZE 64
|
||||||
#define HEADROOM (2048 / TXBB_SIZE + 1)
|
#define HEADROOM (2048 / TXBB_SIZE + 1)
|
||||||
#define STAMP_STRIDE 64
|
#define STAMP_STRIDE 64
|
||||||
@ -124,6 +125,7 @@ enum {
|
|||||||
#define MLX4_EN_RX_SIZE_THRESH 1024
|
#define MLX4_EN_RX_SIZE_THRESH 1024
|
||||||
#define MLX4_EN_RX_RATE_THRESH (1000000 / MLX4_EN_RX_COAL_TIME_HIGH)
|
#define MLX4_EN_RX_RATE_THRESH (1000000 / MLX4_EN_RX_COAL_TIME_HIGH)
|
||||||
#define MLX4_EN_SAMPLE_INTERVAL 0
|
#define MLX4_EN_SAMPLE_INTERVAL 0
|
||||||
|
#define MLX4_EN_AVG_PKT_SMALL 256
|
||||||
|
|
||||||
#define MLX4_EN_AUTO_CONF 0xffff
|
#define MLX4_EN_AUTO_CONF 0xffff
|
||||||
|
|
||||||
@ -214,6 +216,9 @@ struct mlx4_en_tx_desc {
|
|||||||
|
|
||||||
#define MLX4_EN_USE_SRQ 0x01000000
|
#define MLX4_EN_USE_SRQ 0x01000000
|
||||||
|
|
||||||
|
#define MLX4_EN_CX3_LOW_ID 0x1000
|
||||||
|
#define MLX4_EN_CX3_HIGH_ID 0x1005
|
||||||
|
|
||||||
struct mlx4_en_rx_alloc {
|
struct mlx4_en_rx_alloc {
|
||||||
struct page *page;
|
struct page *page;
|
||||||
u16 offset;
|
u16 offset;
|
||||||
@ -243,6 +248,8 @@ struct mlx4_en_tx_ring {
|
|||||||
unsigned long bytes;
|
unsigned long bytes;
|
||||||
unsigned long packets;
|
unsigned long packets;
|
||||||
spinlock_t comp_lock;
|
spinlock_t comp_lock;
|
||||||
|
struct mlx4_bf bf;
|
||||||
|
bool bf_enabled;
|
||||||
};
|
};
|
||||||
|
|
||||||
struct mlx4_en_rx_desc {
|
struct mlx4_en_rx_desc {
|
||||||
@ -453,6 +460,7 @@ struct mlx4_en_priv {
|
|||||||
struct mlx4_en_rss_map rss_map;
|
struct mlx4_en_rss_map rss_map;
|
||||||
u32 flags;
|
u32 flags;
|
||||||
#define MLX4_EN_FLAG_PROMISC 0x1
|
#define MLX4_EN_FLAG_PROMISC 0x1
|
||||||
|
#define MLX4_EN_FLAG_MC_PROMISC 0x2
|
||||||
u32 tx_ring_num;
|
u32 tx_ring_num;
|
||||||
u32 rx_ring_num;
|
u32 rx_ring_num;
|
||||||
u32 rx_skb_size;
|
u32 rx_skb_size;
|
||||||
@ -461,6 +469,7 @@ struct mlx4_en_priv {
|
|||||||
u16 log_rx_info;
|
u16 log_rx_info;
|
||||||
|
|
||||||
struct mlx4_en_tx_ring tx_ring[MAX_TX_RINGS];
|
struct mlx4_en_tx_ring tx_ring[MAX_TX_RINGS];
|
||||||
|
int tx_vector;
|
||||||
struct mlx4_en_rx_ring rx_ring[MAX_RX_RINGS];
|
struct mlx4_en_rx_ring rx_ring[MAX_RX_RINGS];
|
||||||
struct mlx4_en_cq tx_cq[MAX_TX_RINGS];
|
struct mlx4_en_cq tx_cq[MAX_TX_RINGS];
|
||||||
struct mlx4_en_cq rx_cq[MAX_RX_RINGS];
|
struct mlx4_en_cq rx_cq[MAX_RX_RINGS];
|
||||||
@ -476,6 +485,13 @@ struct mlx4_en_priv {
|
|||||||
int mc_addrs_cnt;
|
int mc_addrs_cnt;
|
||||||
struct mlx4_en_stat_out_mbox hw_stats;
|
struct mlx4_en_stat_out_mbox hw_stats;
|
||||||
int vids[128];
|
int vids[128];
|
||||||
|
bool wol;
|
||||||
|
};
|
||||||
|
|
||||||
|
enum mlx4_en_wol {
|
||||||
|
MLX4_EN_WOL_MAGIC = (1ULL << 61),
|
||||||
|
MLX4_EN_WOL_ENABLED = (1ULL << 62),
|
||||||
|
MLX4_EN_WOL_DO_MODIFY = (1ULL << 63),
|
||||||
};
|
};
|
||||||
|
|
||||||
|
|
||||||
@ -486,12 +502,13 @@ int mlx4_en_init_netdev(struct mlx4_en_dev *mdev, int port,
|
|||||||
int mlx4_en_start_port(struct net_device *dev);
|
int mlx4_en_start_port(struct net_device *dev);
|
||||||
void mlx4_en_stop_port(struct net_device *dev);
|
void mlx4_en_stop_port(struct net_device *dev);
|
||||||
|
|
||||||
void mlx4_en_free_resources(struct mlx4_en_priv *priv);
|
void mlx4_en_free_resources(struct mlx4_en_priv *priv, bool reserve_vectors);
|
||||||
int mlx4_en_alloc_resources(struct mlx4_en_priv *priv);
|
int mlx4_en_alloc_resources(struct mlx4_en_priv *priv);
|
||||||
|
|
||||||
int mlx4_en_create_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq *cq,
|
int mlx4_en_create_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq *cq,
|
||||||
int entries, int ring, enum cq_type mode);
|
int entries, int ring, enum cq_type mode);
|
||||||
void mlx4_en_destroy_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq *cq);
|
void mlx4_en_destroy_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq *cq,
|
||||||
|
bool reserve_vectors);
|
||||||
int mlx4_en_activate_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq *cq);
|
int mlx4_en_activate_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq *cq);
|
||||||
void mlx4_en_deactivate_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq *cq);
|
void mlx4_en_deactivate_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq *cq);
|
||||||
int mlx4_en_set_cq_moder(struct mlx4_en_priv *priv, struct mlx4_en_cq *cq);
|
int mlx4_en_set_cq_moder(struct mlx4_en_priv *priv, struct mlx4_en_cq *cq);
|
||||||
@ -503,7 +520,7 @@ u16 mlx4_en_select_queue(struct net_device *dev, struct sk_buff *skb);
|
|||||||
netdev_tx_t mlx4_en_xmit(struct sk_buff *skb, struct net_device *dev);
|
netdev_tx_t mlx4_en_xmit(struct sk_buff *skb, struct net_device *dev);
|
||||||
|
|
||||||
int mlx4_en_create_tx_ring(struct mlx4_en_priv *priv, struct mlx4_en_tx_ring *ring,
|
int mlx4_en_create_tx_ring(struct mlx4_en_priv *priv, struct mlx4_en_tx_ring *ring,
|
||||||
u32 size, u16 stride);
|
int qpn, u32 size, u16 stride);
|
||||||
void mlx4_en_destroy_tx_ring(struct mlx4_en_priv *priv, struct mlx4_en_tx_ring *ring);
|
void mlx4_en_destroy_tx_ring(struct mlx4_en_priv *priv, struct mlx4_en_tx_ring *ring);
|
||||||
int mlx4_en_activate_tx_ring(struct mlx4_en_priv *priv,
|
int mlx4_en_activate_tx_ring(struct mlx4_en_priv *priv,
|
||||||
struct mlx4_en_tx_ring *ring,
|
struct mlx4_en_tx_ring *ring,
|
||||||
|
@ -32,12 +32,17 @@
|
|||||||
*/
|
*/
|
||||||
|
|
||||||
#include <linux/errno.h>
|
#include <linux/errno.h>
|
||||||
|
#include <linux/io-mapping.h>
|
||||||
|
|
||||||
#include <asm/page.h>
|
#include <asm/page.h>
|
||||||
|
|
||||||
#include "mlx4.h"
|
#include "mlx4.h"
|
||||||
#include "icm.h"
|
#include "icm.h"
|
||||||
|
|
||||||
|
enum {
|
||||||
|
MLX4_NUM_RESERVED_UARS = 8
|
||||||
|
};
|
||||||
|
|
||||||
int mlx4_pd_alloc(struct mlx4_dev *dev, u32 *pdn)
|
int mlx4_pd_alloc(struct mlx4_dev *dev, u32 *pdn)
|
||||||
{
|
{
|
||||||
struct mlx4_priv *priv = mlx4_priv(dev);
|
struct mlx4_priv *priv = mlx4_priv(dev);
|
||||||
@ -77,6 +82,7 @@ int mlx4_uar_alloc(struct mlx4_dev *dev, struct mlx4_uar *uar)
|
|||||||
return -ENOMEM;
|
return -ENOMEM;
|
||||||
|
|
||||||
uar->pfn = (pci_resource_start(dev->pdev, 2) >> PAGE_SHIFT) + uar->index;
|
uar->pfn = (pci_resource_start(dev->pdev, 2) >> PAGE_SHIFT) + uar->index;
|
||||||
|
uar->map = NULL;
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
@ -88,6 +94,102 @@ void mlx4_uar_free(struct mlx4_dev *dev, struct mlx4_uar *uar)
|
|||||||
}
|
}
|
||||||
EXPORT_SYMBOL_GPL(mlx4_uar_free);
|
EXPORT_SYMBOL_GPL(mlx4_uar_free);
|
||||||
|
|
||||||
|
int mlx4_bf_alloc(struct mlx4_dev *dev, struct mlx4_bf *bf)
|
||||||
|
{
|
||||||
|
struct mlx4_priv *priv = mlx4_priv(dev);
|
||||||
|
struct mlx4_uar *uar;
|
||||||
|
int err = 0;
|
||||||
|
int idx;
|
||||||
|
|
||||||
|
if (!priv->bf_mapping)
|
||||||
|
return -ENOMEM;
|
||||||
|
|
||||||
|
mutex_lock(&priv->bf_mutex);
|
||||||
|
if (!list_empty(&priv->bf_list))
|
||||||
|
uar = list_entry(priv->bf_list.next, struct mlx4_uar, bf_list);
|
||||||
|
else {
|
||||||
|
if (mlx4_bitmap_avail(&priv->uar_table.bitmap) < MLX4_NUM_RESERVED_UARS) {
|
||||||
|
err = -ENOMEM;
|
||||||
|
goto out;
|
||||||
|
}
|
||||||
|
uar = kmalloc(sizeof *uar, GFP_KERNEL);
|
||||||
|
if (!uar) {
|
||||||
|
err = -ENOMEM;
|
||||||
|
goto out;
|
||||||
|
}
|
||||||
|
err = mlx4_uar_alloc(dev, uar);
|
||||||
|
if (err)
|
||||||
|
goto free_kmalloc;
|
||||||
|
|
||||||
|
uar->map = ioremap(uar->pfn << PAGE_SHIFT, PAGE_SIZE);
|
||||||
|
if (!uar->map) {
|
||||||
|
err = -ENOMEM;
|
||||||
|
goto free_uar;
|
||||||
|
}
|
||||||
|
|
||||||
|
uar->bf_map = io_mapping_map_wc(priv->bf_mapping, uar->index << PAGE_SHIFT);
|
||||||
|
if (!uar->bf_map) {
|
||||||
|
err = -ENOMEM;
|
||||||
|
goto unamp_uar;
|
||||||
|
}
|
||||||
|
uar->free_bf_bmap = 0;
|
||||||
|
list_add(&uar->bf_list, &priv->bf_list);
|
||||||
|
}
|
||||||
|
|
||||||
|
bf->uar = uar;
|
||||||
|
idx = ffz(uar->free_bf_bmap);
|
||||||
|
uar->free_bf_bmap |= 1 << idx;
|
||||||
|
bf->uar = uar;
|
||||||
|
bf->offset = 0;
|
||||||
|
bf->buf_size = dev->caps.bf_reg_size / 2;
|
||||||
|
bf->reg = uar->bf_map + idx * dev->caps.bf_reg_size;
|
||||||
|
if (uar->free_bf_bmap == (1 << dev->caps.bf_regs_per_page) - 1)
|
||||||
|
list_del_init(&uar->bf_list);
|
||||||
|
|
||||||
|
goto out;
|
||||||
|
|
||||||
|
unamp_uar:
|
||||||
|
bf->uar = NULL;
|
||||||
|
iounmap(uar->map);
|
||||||
|
|
||||||
|
free_uar:
|
||||||
|
mlx4_uar_free(dev, uar);
|
||||||
|
|
||||||
|
free_kmalloc:
|
||||||
|
kfree(uar);
|
||||||
|
|
||||||
|
out:
|
||||||
|
mutex_unlock(&priv->bf_mutex);
|
||||||
|
return err;
|
||||||
|
}
|
||||||
|
EXPORT_SYMBOL_GPL(mlx4_bf_alloc);
|
||||||
|
|
||||||
|
void mlx4_bf_free(struct mlx4_dev *dev, struct mlx4_bf *bf)
|
||||||
|
{
|
||||||
|
struct mlx4_priv *priv = mlx4_priv(dev);
|
||||||
|
int idx;
|
||||||
|
|
||||||
|
if (!bf->uar || !bf->uar->bf_map)
|
||||||
|
return;
|
||||||
|
|
||||||
|
mutex_lock(&priv->bf_mutex);
|
||||||
|
idx = (bf->reg - bf->uar->bf_map) / dev->caps.bf_reg_size;
|
||||||
|
bf->uar->free_bf_bmap &= ~(1 << idx);
|
||||||
|
if (!bf->uar->free_bf_bmap) {
|
||||||
|
if (!list_empty(&bf->uar->bf_list))
|
||||||
|
list_del(&bf->uar->bf_list);
|
||||||
|
|
||||||
|
io_mapping_unmap(bf->uar->bf_map);
|
||||||
|
iounmap(bf->uar->map);
|
||||||
|
mlx4_uar_free(dev, bf->uar);
|
||||||
|
kfree(bf->uar);
|
||||||
|
} else if (list_empty(&bf->uar->bf_list))
|
||||||
|
list_add(&bf->uar->bf_list, &priv->bf_list);
|
||||||
|
|
||||||
|
mutex_unlock(&priv->bf_mutex);
|
||||||
|
}
|
||||||
|
EXPORT_SYMBOL_GPL(mlx4_bf_free);
|
||||||
|
|
||||||
int mlx4_init_uar_table(struct mlx4_dev *dev)
|
int mlx4_init_uar_table(struct mlx4_dev *dev)
|
||||||
{
|
{
|
||||||
if (dev->caps.num_uars <= 128) {
|
if (dev->caps.num_uars <= 128) {
|
||||||
|
@ -90,12 +90,79 @@ static int mlx4_set_port_mac_table(struct mlx4_dev *dev, u8 port,
|
|||||||
return err;
|
return err;
|
||||||
}
|
}
|
||||||
|
|
||||||
int mlx4_register_mac(struct mlx4_dev *dev, u8 port, u64 mac, int *index)
|
static int mlx4_uc_steer_add(struct mlx4_dev *dev, u8 port,
|
||||||
|
u64 mac, int *qpn, u8 reserve)
|
||||||
{
|
{
|
||||||
struct mlx4_mac_table *table = &mlx4_priv(dev)->port[port].mac_table;
|
struct mlx4_qp qp;
|
||||||
|
u8 gid[16] = {0};
|
||||||
|
int err;
|
||||||
|
|
||||||
|
if (reserve) {
|
||||||
|
err = mlx4_qp_reserve_range(dev, 1, 1, qpn);
|
||||||
|
if (err) {
|
||||||
|
mlx4_err(dev, "Failed to reserve qp for mac registration\n");
|
||||||
|
return err;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
qp.qpn = *qpn;
|
||||||
|
|
||||||
|
mac &= 0xffffffffffffULL;
|
||||||
|
mac = cpu_to_be64(mac << 16);
|
||||||
|
memcpy(&gid[10], &mac, ETH_ALEN);
|
||||||
|
gid[5] = port;
|
||||||
|
gid[7] = MLX4_UC_STEER << 1;
|
||||||
|
|
||||||
|
err = mlx4_qp_attach_common(dev, &qp, gid, 0,
|
||||||
|
MLX4_PROT_ETH, MLX4_UC_STEER);
|
||||||
|
if (err && reserve)
|
||||||
|
mlx4_qp_release_range(dev, *qpn, 1);
|
||||||
|
|
||||||
|
return err;
|
||||||
|
}
|
||||||
|
|
||||||
|
static void mlx4_uc_steer_release(struct mlx4_dev *dev, u8 port,
|
||||||
|
u64 mac, int qpn, u8 free)
|
||||||
|
{
|
||||||
|
struct mlx4_qp qp;
|
||||||
|
u8 gid[16] = {0};
|
||||||
|
|
||||||
|
qp.qpn = qpn;
|
||||||
|
mac &= 0xffffffffffffULL;
|
||||||
|
mac = cpu_to_be64(mac << 16);
|
||||||
|
memcpy(&gid[10], &mac, ETH_ALEN);
|
||||||
|
gid[5] = port;
|
||||||
|
gid[7] = MLX4_UC_STEER << 1;
|
||||||
|
|
||||||
|
mlx4_qp_detach_common(dev, &qp, gid, MLX4_PROT_ETH, MLX4_UC_STEER);
|
||||||
|
if (free)
|
||||||
|
mlx4_qp_release_range(dev, qpn, 1);
|
||||||
|
}
|
||||||
|
|
||||||
|
int mlx4_register_mac(struct mlx4_dev *dev, u8 port, u64 mac, int *qpn, u8 wrap)
|
||||||
|
{
|
||||||
|
struct mlx4_port_info *info = &mlx4_priv(dev)->port[port];
|
||||||
|
struct mlx4_mac_table *table = &info->mac_table;
|
||||||
|
struct mlx4_mac_entry *entry;
|
||||||
int i, err = 0;
|
int i, err = 0;
|
||||||
int free = -1;
|
int free = -1;
|
||||||
|
|
||||||
|
if (dev->caps.vep_uc_steering) {
|
||||||
|
err = mlx4_uc_steer_add(dev, port, mac, qpn, 1);
|
||||||
|
if (!err) {
|
||||||
|
entry = kmalloc(sizeof *entry, GFP_KERNEL);
|
||||||
|
if (!entry) {
|
||||||
|
mlx4_uc_steer_release(dev, port, mac, *qpn, 1);
|
||||||
|
return -ENOMEM;
|
||||||
|
}
|
||||||
|
entry->mac = mac;
|
||||||
|
err = radix_tree_insert(&info->mac_tree, *qpn, entry);
|
||||||
|
if (err) {
|
||||||
|
mlx4_uc_steer_release(dev, port, mac, *qpn, 1);
|
||||||
|
return err;
|
||||||
|
}
|
||||||
|
} else
|
||||||
|
return err;
|
||||||
|
}
|
||||||
mlx4_dbg(dev, "Registering MAC: 0x%llx\n", (unsigned long long) mac);
|
mlx4_dbg(dev, "Registering MAC: 0x%llx\n", (unsigned long long) mac);
|
||||||
mutex_lock(&table->mutex);
|
mutex_lock(&table->mutex);
|
||||||
for (i = 0; i < MLX4_MAX_MAC_NUM - 1; i++) {
|
for (i = 0; i < MLX4_MAX_MAC_NUM - 1; i++) {
|
||||||
@ -106,7 +173,6 @@ int mlx4_register_mac(struct mlx4_dev *dev, u8 port, u64 mac, int *index)
|
|||||||
|
|
||||||
if (mac == (MLX4_MAC_MASK & be64_to_cpu(table->entries[i]))) {
|
if (mac == (MLX4_MAC_MASK & be64_to_cpu(table->entries[i]))) {
|
||||||
/* MAC already registered, increase refernce count */
|
/* MAC already registered, increase refernce count */
|
||||||
*index = i;
|
|
||||||
++table->refs[i];
|
++table->refs[i];
|
||||||
goto out;
|
goto out;
|
||||||
}
|
}
|
||||||
@ -137,7 +203,8 @@ int mlx4_register_mac(struct mlx4_dev *dev, u8 port, u64 mac, int *index)
|
|||||||
goto out;
|
goto out;
|
||||||
}
|
}
|
||||||
|
|
||||||
*index = free;
|
if (!dev->caps.vep_uc_steering)
|
||||||
|
*qpn = info->base_qpn + free;
|
||||||
++table->total;
|
++table->total;
|
||||||
out:
|
out:
|
||||||
mutex_unlock(&table->mutex);
|
mutex_unlock(&table->mutex);
|
||||||
@ -145,20 +212,52 @@ out:
|
|||||||
}
|
}
|
||||||
EXPORT_SYMBOL_GPL(mlx4_register_mac);
|
EXPORT_SYMBOL_GPL(mlx4_register_mac);
|
||||||
|
|
||||||
void mlx4_unregister_mac(struct mlx4_dev *dev, u8 port, int index)
|
static int validate_index(struct mlx4_dev *dev,
|
||||||
|
struct mlx4_mac_table *table, int index)
|
||||||
{
|
{
|
||||||
struct mlx4_mac_table *table = &mlx4_priv(dev)->port[port].mac_table;
|
int err = 0;
|
||||||
|
|
||||||
|
if (index < 0 || index >= table->max || !table->entries[index]) {
|
||||||
|
mlx4_warn(dev, "No valid Mac entry for the given index\n");
|
||||||
|
err = -EINVAL;
|
||||||
|
}
|
||||||
|
return err;
|
||||||
|
}
|
||||||
|
|
||||||
|
static int find_index(struct mlx4_dev *dev,
|
||||||
|
struct mlx4_mac_table *table, u64 mac)
|
||||||
|
{
|
||||||
|
int i;
|
||||||
|
for (i = 0; i < MLX4_MAX_MAC_NUM; i++) {
|
||||||
|
if (mac == (MLX4_MAC_MASK & be64_to_cpu(table->entries[i])))
|
||||||
|
return i;
|
||||||
|
}
|
||||||
|
/* Mac not found */
|
||||||
|
return -EINVAL;
|
||||||
|
}
|
||||||
|
|
||||||
|
void mlx4_unregister_mac(struct mlx4_dev *dev, u8 port, int qpn)
|
||||||
|
{
|
||||||
|
struct mlx4_port_info *info = &mlx4_priv(dev)->port[port];
|
||||||
|
struct mlx4_mac_table *table = &info->mac_table;
|
||||||
|
int index = qpn - info->base_qpn;
|
||||||
|
struct mlx4_mac_entry *entry;
|
||||||
|
|
||||||
|
if (dev->caps.vep_uc_steering) {
|
||||||
|
entry = radix_tree_lookup(&info->mac_tree, qpn);
|
||||||
|
if (entry) {
|
||||||
|
mlx4_uc_steer_release(dev, port, entry->mac, qpn, 1);
|
||||||
|
radix_tree_delete(&info->mac_tree, qpn);
|
||||||
|
index = find_index(dev, table, entry->mac);
|
||||||
|
kfree(entry);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
mutex_lock(&table->mutex);
|
mutex_lock(&table->mutex);
|
||||||
if (!table->refs[index]) {
|
|
||||||
mlx4_warn(dev, "No MAC entry for index %d\n", index);
|
if (validate_index(dev, table, index))
|
||||||
goto out;
|
goto out;
|
||||||
}
|
|
||||||
if (--table->refs[index]) {
|
|
||||||
mlx4_warn(dev, "Have more references for index %d,"
|
|
||||||
"no need to modify MAC table\n", index);
|
|
||||||
goto out;
|
|
||||||
}
|
|
||||||
table->entries[index] = 0;
|
table->entries[index] = 0;
|
||||||
mlx4_set_port_mac_table(dev, port, table->entries);
|
mlx4_set_port_mac_table(dev, port, table->entries);
|
||||||
--table->total;
|
--table->total;
|
||||||
@ -167,6 +266,44 @@ out:
|
|||||||
}
|
}
|
||||||
EXPORT_SYMBOL_GPL(mlx4_unregister_mac);
|
EXPORT_SYMBOL_GPL(mlx4_unregister_mac);
|
||||||
|
|
||||||
|
int mlx4_replace_mac(struct mlx4_dev *dev, u8 port, int qpn, u64 new_mac, u8 wrap)
|
||||||
|
{
|
||||||
|
struct mlx4_port_info *info = &mlx4_priv(dev)->port[port];
|
||||||
|
struct mlx4_mac_table *table = &info->mac_table;
|
||||||
|
int index = qpn - info->base_qpn;
|
||||||
|
struct mlx4_mac_entry *entry;
|
||||||
|
int err;
|
||||||
|
|
||||||
|
if (dev->caps.vep_uc_steering) {
|
||||||
|
entry = radix_tree_lookup(&info->mac_tree, qpn);
|
||||||
|
if (!entry)
|
||||||
|
return -EINVAL;
|
||||||
|
index = find_index(dev, table, entry->mac);
|
||||||
|
mlx4_uc_steer_release(dev, port, entry->mac, qpn, 0);
|
||||||
|
entry->mac = new_mac;
|
||||||
|
err = mlx4_uc_steer_add(dev, port, entry->mac, &qpn, 0);
|
||||||
|
if (err || index < 0)
|
||||||
|
return err;
|
||||||
|
}
|
||||||
|
|
||||||
|
mutex_lock(&table->mutex);
|
||||||
|
|
||||||
|
err = validate_index(dev, table, index);
|
||||||
|
if (err)
|
||||||
|
goto out;
|
||||||
|
|
||||||
|
table->entries[index] = cpu_to_be64(new_mac | MLX4_MAC_VALID);
|
||||||
|
|
||||||
|
err = mlx4_set_port_mac_table(dev, port, table->entries);
|
||||||
|
if (unlikely(err)) {
|
||||||
|
mlx4_err(dev, "Failed adding MAC: 0x%llx\n", (unsigned long long) new_mac);
|
||||||
|
table->entries[index] = 0;
|
||||||
|
}
|
||||||
|
out:
|
||||||
|
mutex_unlock(&table->mutex);
|
||||||
|
return err;
|
||||||
|
}
|
||||||
|
EXPORT_SYMBOL_GPL(mlx4_replace_mac);
|
||||||
static int mlx4_set_port_vlan_table(struct mlx4_dev *dev, u8 port,
|
static int mlx4_set_port_vlan_table(struct mlx4_dev *dev, u8 port,
|
||||||
__be32 *entries)
|
__be32 *entries)
|
||||||
{
|
{
|
||||||
|
@ -107,9 +107,7 @@ u64 mlx4_make_profile(struct mlx4_dev *dev,
|
|||||||
profile[MLX4_RES_AUXC].num = request->num_qp;
|
profile[MLX4_RES_AUXC].num = request->num_qp;
|
||||||
profile[MLX4_RES_SRQ].num = request->num_srq;
|
profile[MLX4_RES_SRQ].num = request->num_srq;
|
||||||
profile[MLX4_RES_CQ].num = request->num_cq;
|
profile[MLX4_RES_CQ].num = request->num_cq;
|
||||||
profile[MLX4_RES_EQ].num = min_t(unsigned, dev_cap->max_eqs,
|
profile[MLX4_RES_EQ].num = min_t(unsigned, dev_cap->max_eqs, MAX_MSIX);
|
||||||
dev_cap->reserved_eqs +
|
|
||||||
num_possible_cpus() + 1);
|
|
||||||
profile[MLX4_RES_DMPT].num = request->num_mpt;
|
profile[MLX4_RES_DMPT].num = request->num_mpt;
|
||||||
profile[MLX4_RES_CMPT].num = MLX4_NUM_CMPTS;
|
profile[MLX4_RES_CMPT].num = MLX4_NUM_CMPTS;
|
||||||
profile[MLX4_RES_MTT].num = request->num_mtt;
|
profile[MLX4_RES_MTT].num = request->num_mtt;
|
||||||
|
@ -3645,6 +3645,7 @@ static void myri10ge_free_slices(struct myri10ge_priv *mgp)
|
|||||||
dma_free_coherent(&pdev->dev, bytes,
|
dma_free_coherent(&pdev->dev, bytes,
|
||||||
ss->fw_stats, ss->fw_stats_bus);
|
ss->fw_stats, ss->fw_stats_bus);
|
||||||
ss->fw_stats = NULL;
|
ss->fw_stats = NULL;
|
||||||
|
netif_napi_del(&ss->napi);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
kfree(mgp->ss);
|
kfree(mgp->ss);
|
||||||
|
@ -2441,7 +2441,7 @@ static struct pci_error_handlers pch_gbe_err_handler = {
|
|||||||
.resume = pch_gbe_io_resume
|
.resume = pch_gbe_io_resume
|
||||||
};
|
};
|
||||||
|
|
||||||
static struct pci_driver pch_gbe_pcidev = {
|
static struct pci_driver pch_gbe_driver = {
|
||||||
.name = KBUILD_MODNAME,
|
.name = KBUILD_MODNAME,
|
||||||
.id_table = pch_gbe_pcidev_id,
|
.id_table = pch_gbe_pcidev_id,
|
||||||
.probe = pch_gbe_probe,
|
.probe = pch_gbe_probe,
|
||||||
@ -2458,7 +2458,7 @@ static int __init pch_gbe_init_module(void)
|
|||||||
{
|
{
|
||||||
int ret;
|
int ret;
|
||||||
|
|
||||||
ret = pci_register_driver(&pch_gbe_pcidev);
|
ret = pci_register_driver(&pch_gbe_driver);
|
||||||
if (copybreak != PCH_GBE_COPYBREAK_DEFAULT) {
|
if (copybreak != PCH_GBE_COPYBREAK_DEFAULT) {
|
||||||
if (copybreak == 0) {
|
if (copybreak == 0) {
|
||||||
pr_info("copybreak disabled\n");
|
pr_info("copybreak disabled\n");
|
||||||
@ -2472,7 +2472,7 @@ static int __init pch_gbe_init_module(void)
|
|||||||
|
|
||||||
static void __exit pch_gbe_exit_module(void)
|
static void __exit pch_gbe_exit_module(void)
|
||||||
{
|
{
|
||||||
pci_unregister_driver(&pch_gbe_pcidev);
|
pci_unregister_driver(&pch_gbe_driver);
|
||||||
}
|
}
|
||||||
|
|
||||||
module_init(pch_gbe_init_module);
|
module_init(pch_gbe_init_module);
|
||||||
|
@ -1054,6 +1054,7 @@ static int efx_init_io(struct efx_nic *efx)
|
|||||||
{
|
{
|
||||||
struct pci_dev *pci_dev = efx->pci_dev;
|
struct pci_dev *pci_dev = efx->pci_dev;
|
||||||
dma_addr_t dma_mask = efx->type->max_dma_mask;
|
dma_addr_t dma_mask = efx->type->max_dma_mask;
|
||||||
|
bool use_wc;
|
||||||
int rc;
|
int rc;
|
||||||
|
|
||||||
netif_dbg(efx, probe, efx->net_dev, "initialising I/O\n");
|
netif_dbg(efx, probe, efx->net_dev, "initialising I/O\n");
|
||||||
@ -1104,8 +1105,21 @@ static int efx_init_io(struct efx_nic *efx)
|
|||||||
rc = -EIO;
|
rc = -EIO;
|
||||||
goto fail3;
|
goto fail3;
|
||||||
}
|
}
|
||||||
efx->membase = ioremap_wc(efx->membase_phys,
|
|
||||||
efx->type->mem_map_size);
|
/* bug22643: If SR-IOV is enabled then tx push over a write combined
|
||||||
|
* mapping is unsafe. We need to disable write combining in this case.
|
||||||
|
* MSI is unsupported when SR-IOV is enabled, and the firmware will
|
||||||
|
* have removed the MSI capability. So write combining is safe if
|
||||||
|
* there is an MSI capability.
|
||||||
|
*/
|
||||||
|
use_wc = (!EFX_WORKAROUND_22643(efx) ||
|
||||||
|
pci_find_capability(pci_dev, PCI_CAP_ID_MSI));
|
||||||
|
if (use_wc)
|
||||||
|
efx->membase = ioremap_wc(efx->membase_phys,
|
||||||
|
efx->type->mem_map_size);
|
||||||
|
else
|
||||||
|
efx->membase = ioremap_nocache(efx->membase_phys,
|
||||||
|
efx->type->mem_map_size);
|
||||||
if (!efx->membase) {
|
if (!efx->membase) {
|
||||||
netif_err(efx, probe, efx->net_dev,
|
netif_err(efx, probe, efx->net_dev,
|
||||||
"could not map memory BAR at %llx+%x\n",
|
"could not map memory BAR at %llx+%x\n",
|
||||||
|
@ -38,6 +38,8 @@
|
|||||||
#define EFX_WORKAROUND_15783 EFX_WORKAROUND_ALWAYS
|
#define EFX_WORKAROUND_15783 EFX_WORKAROUND_ALWAYS
|
||||||
/* Legacy interrupt storm when interrupt fifo fills */
|
/* Legacy interrupt storm when interrupt fifo fills */
|
||||||
#define EFX_WORKAROUND_17213 EFX_WORKAROUND_SIENA
|
#define EFX_WORKAROUND_17213 EFX_WORKAROUND_SIENA
|
||||||
|
/* Write combining and sriov=enabled are incompatible */
|
||||||
|
#define EFX_WORKAROUND_22643 EFX_WORKAROUND_SIENA
|
||||||
|
|
||||||
/* Spurious parity errors in TSORT buffers */
|
/* Spurious parity errors in TSORT buffers */
|
||||||
#define EFX_WORKAROUND_5129 EFX_WORKAROUND_FALCON_A
|
#define EFX_WORKAROUND_5129 EFX_WORKAROUND_FALCON_A
|
||||||
|
@ -49,6 +49,8 @@
|
|||||||
|
|
||||||
struct smsc95xx_priv {
|
struct smsc95xx_priv {
|
||||||
u32 mac_cr;
|
u32 mac_cr;
|
||||||
|
u32 hash_hi;
|
||||||
|
u32 hash_lo;
|
||||||
spinlock_t mac_cr_lock;
|
spinlock_t mac_cr_lock;
|
||||||
bool use_tx_csum;
|
bool use_tx_csum;
|
||||||
bool use_rx_csum;
|
bool use_rx_csum;
|
||||||
@ -370,10 +372,11 @@ static void smsc95xx_set_multicast(struct net_device *netdev)
|
|||||||
{
|
{
|
||||||
struct usbnet *dev = netdev_priv(netdev);
|
struct usbnet *dev = netdev_priv(netdev);
|
||||||
struct smsc95xx_priv *pdata = (struct smsc95xx_priv *)(dev->data[0]);
|
struct smsc95xx_priv *pdata = (struct smsc95xx_priv *)(dev->data[0]);
|
||||||
u32 hash_hi = 0;
|
|
||||||
u32 hash_lo = 0;
|
|
||||||
unsigned long flags;
|
unsigned long flags;
|
||||||
|
|
||||||
|
pdata->hash_hi = 0;
|
||||||
|
pdata->hash_lo = 0;
|
||||||
|
|
||||||
spin_lock_irqsave(&pdata->mac_cr_lock, flags);
|
spin_lock_irqsave(&pdata->mac_cr_lock, flags);
|
||||||
|
|
||||||
if (dev->net->flags & IFF_PROMISC) {
|
if (dev->net->flags & IFF_PROMISC) {
|
||||||
@ -394,13 +397,13 @@ static void smsc95xx_set_multicast(struct net_device *netdev)
|
|||||||
u32 bitnum = smsc95xx_hash(ha->addr);
|
u32 bitnum = smsc95xx_hash(ha->addr);
|
||||||
u32 mask = 0x01 << (bitnum & 0x1F);
|
u32 mask = 0x01 << (bitnum & 0x1F);
|
||||||
if (bitnum & 0x20)
|
if (bitnum & 0x20)
|
||||||
hash_hi |= mask;
|
pdata->hash_hi |= mask;
|
||||||
else
|
else
|
||||||
hash_lo |= mask;
|
pdata->hash_lo |= mask;
|
||||||
}
|
}
|
||||||
|
|
||||||
netif_dbg(dev, drv, dev->net, "HASHH=0x%08X, HASHL=0x%08X\n",
|
netif_dbg(dev, drv, dev->net, "HASHH=0x%08X, HASHL=0x%08X\n",
|
||||||
hash_hi, hash_lo);
|
pdata->hash_hi, pdata->hash_lo);
|
||||||
} else {
|
} else {
|
||||||
netif_dbg(dev, drv, dev->net, "receive own packets only\n");
|
netif_dbg(dev, drv, dev->net, "receive own packets only\n");
|
||||||
pdata->mac_cr &=
|
pdata->mac_cr &=
|
||||||
@ -410,8 +413,8 @@ static void smsc95xx_set_multicast(struct net_device *netdev)
|
|||||||
spin_unlock_irqrestore(&pdata->mac_cr_lock, flags);
|
spin_unlock_irqrestore(&pdata->mac_cr_lock, flags);
|
||||||
|
|
||||||
/* Initiate async writes, as we can't wait for completion here */
|
/* Initiate async writes, as we can't wait for completion here */
|
||||||
smsc95xx_write_reg_async(dev, HASHH, &hash_hi);
|
smsc95xx_write_reg_async(dev, HASHH, &pdata->hash_hi);
|
||||||
smsc95xx_write_reg_async(dev, HASHL, &hash_lo);
|
smsc95xx_write_reg_async(dev, HASHL, &pdata->hash_lo);
|
||||||
smsc95xx_write_reg_async(dev, MAC_CR, &pdata->mac_cr);
|
smsc95xx_write_reg_async(dev, MAC_CR, &pdata->mac_cr);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -2160,6 +2160,8 @@ static void ath9k_flush(struct ieee80211_hw *hw, bool drop)
|
|||||||
if (!ath_drain_all_txq(sc, false))
|
if (!ath_drain_all_txq(sc, false))
|
||||||
ath_reset(sc, false);
|
ath_reset(sc, false);
|
||||||
|
|
||||||
|
ieee80211_wake_queues(hw);
|
||||||
|
|
||||||
out:
|
out:
|
||||||
ieee80211_queue_delayed_work(hw, &sc->tx_complete_work, 0);
|
ieee80211_queue_delayed_work(hw, &sc->tx_complete_work, 0);
|
||||||
mutex_unlock(&sc->mutex);
|
mutex_unlock(&sc->mutex);
|
||||||
|
@ -1328,7 +1328,7 @@ static void ath_tx_status(void *priv, struct ieee80211_supported_band *sband,
|
|||||||
|
|
||||||
hdr = (struct ieee80211_hdr *)skb->data;
|
hdr = (struct ieee80211_hdr *)skb->data;
|
||||||
fc = hdr->frame_control;
|
fc = hdr->frame_control;
|
||||||
for (i = 0; i < IEEE80211_TX_MAX_RATES; i++) {
|
for (i = 0; i < sc->hw->max_rates; i++) {
|
||||||
struct ieee80211_tx_rate *rate = &tx_info->status.rates[i];
|
struct ieee80211_tx_rate *rate = &tx_info->status.rates[i];
|
||||||
if (!rate->count)
|
if (!rate->count)
|
||||||
break;
|
break;
|
||||||
|
@ -1725,8 +1725,8 @@ static void ath_tx_start_dma(struct ath_softc *sc, struct ath_buf *bf,
|
|||||||
u8 tidno;
|
u8 tidno;
|
||||||
|
|
||||||
spin_lock_bh(&txctl->txq->axq_lock);
|
spin_lock_bh(&txctl->txq->axq_lock);
|
||||||
|
if ((sc->sc_flags & SC_OP_TXAGGR) && txctl->an &&
|
||||||
if (ieee80211_is_data_qos(hdr->frame_control) && txctl->an) {
|
ieee80211_is_data_qos(hdr->frame_control)) {
|
||||||
tidno = ieee80211_get_qos_ctl(hdr)[0] &
|
tidno = ieee80211_get_qos_ctl(hdr)[0] &
|
||||||
IEEE80211_QOS_CTL_TID_MASK;
|
IEEE80211_QOS_CTL_TID_MASK;
|
||||||
tid = ATH_AN_2_TID(txctl->an, tidno);
|
tid = ATH_AN_2_TID(txctl->an, tidno);
|
||||||
|
@ -2265,7 +2265,7 @@ signed long iwlagn_wait_notification(struct iwl_priv *priv,
|
|||||||
int ret;
|
int ret;
|
||||||
|
|
||||||
ret = wait_event_timeout(priv->_agn.notif_waitq,
|
ret = wait_event_timeout(priv->_agn.notif_waitq,
|
||||||
&wait_entry->triggered,
|
wait_entry->triggered,
|
||||||
timeout);
|
timeout);
|
||||||
|
|
||||||
spin_lock_bh(&priv->_agn.notif_wait_lock);
|
spin_lock_bh(&priv->_agn.notif_wait_lock);
|
||||||
|
@ -3009,14 +3009,17 @@ static int iwl_mac_offchannel_tx_cancel_wait(struct ieee80211_hw *hw)
|
|||||||
|
|
||||||
mutex_lock(&priv->mutex);
|
mutex_lock(&priv->mutex);
|
||||||
|
|
||||||
if (!priv->_agn.offchan_tx_skb)
|
if (!priv->_agn.offchan_tx_skb) {
|
||||||
return -EINVAL;
|
ret = -EINVAL;
|
||||||
|
goto unlock;
|
||||||
|
}
|
||||||
|
|
||||||
priv->_agn.offchan_tx_skb = NULL;
|
priv->_agn.offchan_tx_skb = NULL;
|
||||||
|
|
||||||
ret = iwl_scan_cancel_timeout(priv, 200);
|
ret = iwl_scan_cancel_timeout(priv, 200);
|
||||||
if (ret)
|
if (ret)
|
||||||
ret = -EIO;
|
ret = -EIO;
|
||||||
|
unlock:
|
||||||
mutex_unlock(&priv->mutex);
|
mutex_unlock(&priv->mutex);
|
||||||
|
|
||||||
return ret;
|
return ret;
|
||||||
|
@ -153,6 +153,9 @@ static int orinoco_scan(struct wiphy *wiphy, struct net_device *dev,
|
|||||||
priv->scan_request = request;
|
priv->scan_request = request;
|
||||||
|
|
||||||
err = orinoco_hw_trigger_scan(priv, request->ssids);
|
err = orinoco_hw_trigger_scan(priv, request->ssids);
|
||||||
|
/* On error the we aren't processing the request */
|
||||||
|
if (err)
|
||||||
|
priv->scan_request = NULL;
|
||||||
|
|
||||||
return err;
|
return err;
|
||||||
}
|
}
|
||||||
|
@ -1376,13 +1376,13 @@ static void orinoco_process_scan_results(struct work_struct *work)
|
|||||||
|
|
||||||
spin_lock_irqsave(&priv->scan_lock, flags);
|
spin_lock_irqsave(&priv->scan_lock, flags);
|
||||||
list_for_each_entry_safe(sd, temp, &priv->scan_list, list) {
|
list_for_each_entry_safe(sd, temp, &priv->scan_list, list) {
|
||||||
spin_unlock_irqrestore(&priv->scan_lock, flags);
|
|
||||||
|
|
||||||
buf = sd->buf;
|
buf = sd->buf;
|
||||||
len = sd->len;
|
len = sd->len;
|
||||||
type = sd->type;
|
type = sd->type;
|
||||||
|
|
||||||
list_del(&sd->list);
|
list_del(&sd->list);
|
||||||
|
spin_unlock_irqrestore(&priv->scan_lock, flags);
|
||||||
kfree(sd);
|
kfree(sd);
|
||||||
|
|
||||||
if (len > 0) {
|
if (len > 0) {
|
||||||
|
@ -719,6 +719,7 @@ static struct usb_device_id rt2800usb_device_table[] = {
|
|||||||
{ USB_DEVICE(0x0b05, 0x1732), USB_DEVICE_DATA(&rt2800usb_ops) },
|
{ USB_DEVICE(0x0b05, 0x1732), USB_DEVICE_DATA(&rt2800usb_ops) },
|
||||||
{ USB_DEVICE(0x0b05, 0x1742), USB_DEVICE_DATA(&rt2800usb_ops) },
|
{ USB_DEVICE(0x0b05, 0x1742), USB_DEVICE_DATA(&rt2800usb_ops) },
|
||||||
{ USB_DEVICE(0x0b05, 0x1784), USB_DEVICE_DATA(&rt2800usb_ops) },
|
{ USB_DEVICE(0x0b05, 0x1784), USB_DEVICE_DATA(&rt2800usb_ops) },
|
||||||
|
{ USB_DEVICE(0x1761, 0x0b05), USB_DEVICE_DATA(&rt2800usb_ops) },
|
||||||
/* AzureWave */
|
/* AzureWave */
|
||||||
{ USB_DEVICE(0x13d3, 0x3247), USB_DEVICE_DATA(&rt2800usb_ops) },
|
{ USB_DEVICE(0x13d3, 0x3247), USB_DEVICE_DATA(&rt2800usb_ops) },
|
||||||
{ USB_DEVICE(0x13d3, 0x3273), USB_DEVICE_DATA(&rt2800usb_ops) },
|
{ USB_DEVICE(0x13d3, 0x3273), USB_DEVICE_DATA(&rt2800usb_ops) },
|
||||||
@ -913,7 +914,6 @@ static struct usb_device_id rt2800usb_device_table[] = {
|
|||||||
{ USB_DEVICE(0x0b05, 0x1760), USB_DEVICE_DATA(&rt2800usb_ops) },
|
{ USB_DEVICE(0x0b05, 0x1760), USB_DEVICE_DATA(&rt2800usb_ops) },
|
||||||
{ USB_DEVICE(0x0b05, 0x1761), USB_DEVICE_DATA(&rt2800usb_ops) },
|
{ USB_DEVICE(0x0b05, 0x1761), USB_DEVICE_DATA(&rt2800usb_ops) },
|
||||||
{ USB_DEVICE(0x0b05, 0x1790), USB_DEVICE_DATA(&rt2800usb_ops) },
|
{ USB_DEVICE(0x0b05, 0x1790), USB_DEVICE_DATA(&rt2800usb_ops) },
|
||||||
{ USB_DEVICE(0x1761, 0x0b05), USB_DEVICE_DATA(&rt2800usb_ops) },
|
|
||||||
/* AzureWave */
|
/* AzureWave */
|
||||||
{ USB_DEVICE(0x13d3, 0x3262), USB_DEVICE_DATA(&rt2800usb_ops) },
|
{ USB_DEVICE(0x13d3, 0x3262), USB_DEVICE_DATA(&rt2800usb_ops) },
|
||||||
{ USB_DEVICE(0x13d3, 0x3284), USB_DEVICE_DATA(&rt2800usb_ops) },
|
{ USB_DEVICE(0x13d3, 0x3284), USB_DEVICE_DATA(&rt2800usb_ops) },
|
||||||
@ -937,6 +937,8 @@ static struct usb_device_id rt2800usb_device_table[] = {
|
|||||||
{ USB_DEVICE(0x07d1, 0x3c13), USB_DEVICE_DATA(&rt2800usb_ops) },
|
{ USB_DEVICE(0x07d1, 0x3c13), USB_DEVICE_DATA(&rt2800usb_ops) },
|
||||||
{ USB_DEVICE(0x07d1, 0x3c15), USB_DEVICE_DATA(&rt2800usb_ops) },
|
{ USB_DEVICE(0x07d1, 0x3c15), USB_DEVICE_DATA(&rt2800usb_ops) },
|
||||||
{ USB_DEVICE(0x07d1, 0x3c17), USB_DEVICE_DATA(&rt2800usb_ops) },
|
{ USB_DEVICE(0x07d1, 0x3c17), USB_DEVICE_DATA(&rt2800usb_ops) },
|
||||||
|
/* Edimax */
|
||||||
|
{ USB_DEVICE(0x7392, 0x4085), USB_DEVICE_DATA(&rt2800usb_ops) },
|
||||||
/* Encore */
|
/* Encore */
|
||||||
{ USB_DEVICE(0x203d, 0x14a1), USB_DEVICE_DATA(&rt2800usb_ops) },
|
{ USB_DEVICE(0x203d, 0x14a1), USB_DEVICE_DATA(&rt2800usb_ops) },
|
||||||
/* Gemtek */
|
/* Gemtek */
|
||||||
@ -961,6 +963,7 @@ static struct usb_device_id rt2800usb_device_table[] = {
|
|||||||
{ USB_DEVICE(0x1d4d, 0x0010), USB_DEVICE_DATA(&rt2800usb_ops) },
|
{ USB_DEVICE(0x1d4d, 0x0010), USB_DEVICE_DATA(&rt2800usb_ops) },
|
||||||
{ USB_DEVICE(0x1d4d, 0x0011), USB_DEVICE_DATA(&rt2800usb_ops) },
|
{ USB_DEVICE(0x1d4d, 0x0011), USB_DEVICE_DATA(&rt2800usb_ops) },
|
||||||
/* Planex */
|
/* Planex */
|
||||||
|
{ USB_DEVICE(0x2019, 0x5201), USB_DEVICE_DATA(&rt2800usb_ops) },
|
||||||
{ USB_DEVICE(0x2019, 0xab24), USB_DEVICE_DATA(&rt2800usb_ops) },
|
{ USB_DEVICE(0x2019, 0xab24), USB_DEVICE_DATA(&rt2800usb_ops) },
|
||||||
/* Qcom */
|
/* Qcom */
|
||||||
{ USB_DEVICE(0x18e8, 0x6259), USB_DEVICE_DATA(&rt2800usb_ops) },
|
{ USB_DEVICE(0x18e8, 0x6259), USB_DEVICE_DATA(&rt2800usb_ops) },
|
||||||
@ -972,6 +975,8 @@ static struct usb_device_id rt2800usb_device_table[] = {
|
|||||||
/* Sweex */
|
/* Sweex */
|
||||||
{ USB_DEVICE(0x177f, 0x0153), USB_DEVICE_DATA(&rt2800usb_ops) },
|
{ USB_DEVICE(0x177f, 0x0153), USB_DEVICE_DATA(&rt2800usb_ops) },
|
||||||
{ USB_DEVICE(0x177f, 0x0313), USB_DEVICE_DATA(&rt2800usb_ops) },
|
{ USB_DEVICE(0x177f, 0x0313), USB_DEVICE_DATA(&rt2800usb_ops) },
|
||||||
|
/* Toshiba */
|
||||||
|
{ USB_DEVICE(0x0930, 0x0a07), USB_DEVICE_DATA(&rt2800usb_ops) },
|
||||||
/* Zyxel */
|
/* Zyxel */
|
||||||
{ USB_DEVICE(0x0586, 0x341a), USB_DEVICE_DATA(&rt2800usb_ops) },
|
{ USB_DEVICE(0x0586, 0x341a), USB_DEVICE_DATA(&rt2800usb_ops) },
|
||||||
#endif
|
#endif
|
||||||
|
@ -410,8 +410,8 @@ bool efuse_shadow_update(struct ieee80211_hw *hw)
|
|||||||
|
|
||||||
if (!efuse_shadow_update_chk(hw)) {
|
if (!efuse_shadow_update_chk(hw)) {
|
||||||
efuse_read_all_map(hw, &rtlefuse->efuse_map[EFUSE_INIT_MAP][0]);
|
efuse_read_all_map(hw, &rtlefuse->efuse_map[EFUSE_INIT_MAP][0]);
|
||||||
memcpy((void *)&rtlefuse->efuse_map[EFUSE_MODIFY_MAP][0],
|
memcpy(&rtlefuse->efuse_map[EFUSE_MODIFY_MAP][0],
|
||||||
(void *)&rtlefuse->efuse_map[EFUSE_INIT_MAP][0],
|
&rtlefuse->efuse_map[EFUSE_INIT_MAP][0],
|
||||||
rtlpriv->cfg->maps[EFUSE_HWSET_MAX_SIZE]);
|
rtlpriv->cfg->maps[EFUSE_HWSET_MAX_SIZE]);
|
||||||
|
|
||||||
RT_TRACE(rtlpriv, COMP_EFUSE, DBG_LOUD,
|
RT_TRACE(rtlpriv, COMP_EFUSE, DBG_LOUD,
|
||||||
@ -446,9 +446,9 @@ bool efuse_shadow_update(struct ieee80211_hw *hw)
|
|||||||
|
|
||||||
if (word_en != 0x0F) {
|
if (word_en != 0x0F) {
|
||||||
u8 tmpdata[8];
|
u8 tmpdata[8];
|
||||||
memcpy((void *)tmpdata,
|
memcpy(tmpdata,
|
||||||
(void *)(&rtlefuse->
|
&rtlefuse->efuse_map[EFUSE_MODIFY_MAP][base],
|
||||||
efuse_map[EFUSE_MODIFY_MAP][base]), 8);
|
8);
|
||||||
RT_PRINT_DATA(rtlpriv, COMP_INIT, DBG_LOUD,
|
RT_PRINT_DATA(rtlpriv, COMP_INIT, DBG_LOUD,
|
||||||
("U-efuse\n"), tmpdata, 8);
|
("U-efuse\n"), tmpdata, 8);
|
||||||
|
|
||||||
@ -465,8 +465,8 @@ bool efuse_shadow_update(struct ieee80211_hw *hw)
|
|||||||
efuse_power_switch(hw, true, false);
|
efuse_power_switch(hw, true, false);
|
||||||
efuse_read_all_map(hw, &rtlefuse->efuse_map[EFUSE_INIT_MAP][0]);
|
efuse_read_all_map(hw, &rtlefuse->efuse_map[EFUSE_INIT_MAP][0]);
|
||||||
|
|
||||||
memcpy((void *)&rtlefuse->efuse_map[EFUSE_MODIFY_MAP][0],
|
memcpy(&rtlefuse->efuse_map[EFUSE_MODIFY_MAP][0],
|
||||||
(void *)&rtlefuse->efuse_map[EFUSE_INIT_MAP][0],
|
&rtlefuse->efuse_map[EFUSE_INIT_MAP][0],
|
||||||
rtlpriv->cfg->maps[EFUSE_HWSET_MAX_SIZE]);
|
rtlpriv->cfg->maps[EFUSE_HWSET_MAX_SIZE]);
|
||||||
|
|
||||||
RT_TRACE(rtlpriv, COMP_EFUSE, DBG_LOUD, ("<---\n"));
|
RT_TRACE(rtlpriv, COMP_EFUSE, DBG_LOUD, ("<---\n"));
|
||||||
@ -479,13 +479,12 @@ void rtl_efuse_shadow_map_update(struct ieee80211_hw *hw)
|
|||||||
struct rtl_efuse *rtlefuse = rtl_efuse(rtl_priv(hw));
|
struct rtl_efuse *rtlefuse = rtl_efuse(rtl_priv(hw));
|
||||||
|
|
||||||
if (rtlefuse->autoload_failflag == true) {
|
if (rtlefuse->autoload_failflag == true) {
|
||||||
memset((void *)(&rtlefuse->efuse_map[EFUSE_INIT_MAP][0]), 128,
|
memset(&rtlefuse->efuse_map[EFUSE_INIT_MAP][0], 0xFF, 128);
|
||||||
0xFF);
|
|
||||||
} else
|
} else
|
||||||
efuse_read_all_map(hw, &rtlefuse->efuse_map[EFUSE_INIT_MAP][0]);
|
efuse_read_all_map(hw, &rtlefuse->efuse_map[EFUSE_INIT_MAP][0]);
|
||||||
|
|
||||||
memcpy((void *)&rtlefuse->efuse_map[EFUSE_MODIFY_MAP][0],
|
memcpy(&rtlefuse->efuse_map[EFUSE_MODIFY_MAP][0],
|
||||||
(void *)&rtlefuse->efuse_map[EFUSE_INIT_MAP][0],
|
&rtlefuse->efuse_map[EFUSE_INIT_MAP][0],
|
||||||
rtlpriv->cfg->maps[EFUSE_HWSET_MAX_SIZE]);
|
rtlpriv->cfg->maps[EFUSE_HWSET_MAX_SIZE]);
|
||||||
|
|
||||||
}
|
}
|
||||||
@ -694,8 +693,8 @@ static int efuse_pg_packet_read(struct ieee80211_hw *hw, u8 offset, u8 *data)
|
|||||||
if (offset > 15)
|
if (offset > 15)
|
||||||
return false;
|
return false;
|
||||||
|
|
||||||
memset((void *)data, PGPKT_DATA_SIZE * sizeof(u8), 0xff);
|
memset(data, 0xff, PGPKT_DATA_SIZE * sizeof(u8));
|
||||||
memset((void *)tmpdata, PGPKT_DATA_SIZE * sizeof(u8), 0xff);
|
memset(tmpdata, 0xff, PGPKT_DATA_SIZE * sizeof(u8));
|
||||||
|
|
||||||
while (bcontinual && (efuse_addr < EFUSE_MAX_SIZE)) {
|
while (bcontinual && (efuse_addr < EFUSE_MAX_SIZE)) {
|
||||||
if (readstate & PG_STATE_HEADER) {
|
if (readstate & PG_STATE_HEADER) {
|
||||||
@ -862,7 +861,7 @@ static void efuse_write_data_case2(struct ieee80211_hw *hw, u16 *efuse_addr,
|
|||||||
|
|
||||||
tmp_word_cnts = efuse_calculate_word_cnts(tmp_pkt.word_en);
|
tmp_word_cnts = efuse_calculate_word_cnts(tmp_pkt.word_en);
|
||||||
|
|
||||||
memset((void *)originaldata, 8 * sizeof(u8), 0xff);
|
memset(originaldata, 0xff, 8 * sizeof(u8));
|
||||||
|
|
||||||
if (efuse_pg_packet_read(hw, tmp_pkt.offset, originaldata)) {
|
if (efuse_pg_packet_read(hw, tmp_pkt.offset, originaldata)) {
|
||||||
badworden = efuse_word_enable_data_write(hw,
|
badworden = efuse_word_enable_data_write(hw,
|
||||||
@ -917,7 +916,7 @@ static int efuse_pg_packet_write(struct ieee80211_hw *hw,
|
|||||||
target_pkt.offset = offset;
|
target_pkt.offset = offset;
|
||||||
target_pkt.word_en = word_en;
|
target_pkt.word_en = word_en;
|
||||||
|
|
||||||
memset((void *)target_pkt.data, 8 * sizeof(u8), 0xFF);
|
memset(target_pkt.data, 0xFF, 8 * sizeof(u8));
|
||||||
|
|
||||||
efuse_word_enable_data_read(word_en, data, target_pkt.data);
|
efuse_word_enable_data_read(word_en, data, target_pkt.data);
|
||||||
target_word_cnts = efuse_calculate_word_cnts(target_pkt.word_en);
|
target_word_cnts = efuse_calculate_word_cnts(target_pkt.word_en);
|
||||||
@ -1022,7 +1021,7 @@ static u8 efuse_word_enable_data_write(struct ieee80211_hw *hw,
|
|||||||
u8 badworden = 0x0F;
|
u8 badworden = 0x0F;
|
||||||
u8 tmpdata[8];
|
u8 tmpdata[8];
|
||||||
|
|
||||||
memset((void *)tmpdata, PGPKT_DATA_SIZE, 0xff);
|
memset(tmpdata, 0xff, PGPKT_DATA_SIZE);
|
||||||
RT_TRACE(rtlpriv, COMP_EFUSE, DBG_LOUD,
|
RT_TRACE(rtlpriv, COMP_EFUSE, DBG_LOUD,
|
||||||
("word_en = %x efuse_addr=%x\n", word_en, efuse_addr));
|
("word_en = %x efuse_addr=%x\n", word_en, efuse_addr));
|
||||||
|
|
||||||
|
@ -60,6 +60,7 @@ static struct usb_device_id usb_ids[] = {
|
|||||||
{ USB_DEVICE(0x157e, 0x300a), .driver_info = DEVICE_ZD1211 },
|
{ USB_DEVICE(0x157e, 0x300a), .driver_info = DEVICE_ZD1211 },
|
||||||
{ USB_DEVICE(0x157e, 0x300b), .driver_info = DEVICE_ZD1211 },
|
{ USB_DEVICE(0x157e, 0x300b), .driver_info = DEVICE_ZD1211 },
|
||||||
{ USB_DEVICE(0x157e, 0x3204), .driver_info = DEVICE_ZD1211 },
|
{ USB_DEVICE(0x157e, 0x3204), .driver_info = DEVICE_ZD1211 },
|
||||||
|
{ USB_DEVICE(0x157e, 0x3207), .driver_info = DEVICE_ZD1211 },
|
||||||
{ USB_DEVICE(0x1740, 0x2000), .driver_info = DEVICE_ZD1211 },
|
{ USB_DEVICE(0x1740, 0x2000), .driver_info = DEVICE_ZD1211 },
|
||||||
{ USB_DEVICE(0x6891, 0xa727), .driver_info = DEVICE_ZD1211 },
|
{ USB_DEVICE(0x6891, 0xa727), .driver_info = DEVICE_ZD1211 },
|
||||||
/* ZD1211B */
|
/* ZD1211B */
|
||||||
|
@ -36,6 +36,7 @@ struct emac_platform_data {
|
|||||||
|
|
||||||
u8 rmii_en;
|
u8 rmii_en;
|
||||||
u8 version;
|
u8 version;
|
||||||
|
bool no_bd_ram;
|
||||||
void (*interrupt_enable) (void);
|
void (*interrupt_enable) (void);
|
||||||
void (*interrupt_disable) (void);
|
void (*interrupt_disable) (void);
|
||||||
};
|
};
|
||||||
|
@ -648,6 +648,9 @@ enum ethtool_sfeatures_retval_bits {
|
|||||||
|
|
||||||
#include <linux/rculist.h>
|
#include <linux/rculist.h>
|
||||||
|
|
||||||
|
/* needed by dev_disable_lro() */
|
||||||
|
extern int __ethtool_set_flags(struct net_device *dev, u32 flags);
|
||||||
|
|
||||||
struct ethtool_rx_ntuple_flow_spec_container {
|
struct ethtool_rx_ntuple_flow_spec_container {
|
||||||
struct ethtool_rx_ntuple_flow_spec fs;
|
struct ethtool_rx_ntuple_flow_spec fs;
|
||||||
struct list_head list;
|
struct list_head list;
|
||||||
|
@ -39,6 +39,11 @@
|
|||||||
|
|
||||||
#include <asm/atomic.h>
|
#include <asm/atomic.h>
|
||||||
|
|
||||||
|
#define MAX_MSIX_P_PORT 17
|
||||||
|
#define MAX_MSIX 64
|
||||||
|
#define MSIX_LEGACY_SZ 4
|
||||||
|
#define MIN_MSIX_P_PORT 5
|
||||||
|
|
||||||
enum {
|
enum {
|
||||||
MLX4_FLAG_MSI_X = 1 << 0,
|
MLX4_FLAG_MSI_X = 1 << 0,
|
||||||
MLX4_FLAG_OLD_PORT_CMDS = 1 << 1,
|
MLX4_FLAG_OLD_PORT_CMDS = 1 << 1,
|
||||||
@ -145,8 +150,10 @@ enum {
|
|||||||
};
|
};
|
||||||
|
|
||||||
enum mlx4_protocol {
|
enum mlx4_protocol {
|
||||||
MLX4_PROTOCOL_IB,
|
MLX4_PROT_IB_IPV6 = 0,
|
||||||
MLX4_PROTOCOL_EN,
|
MLX4_PROT_ETH,
|
||||||
|
MLX4_PROT_IB_IPV4,
|
||||||
|
MLX4_PROT_FCOE
|
||||||
};
|
};
|
||||||
|
|
||||||
enum {
|
enum {
|
||||||
@ -173,6 +180,12 @@ enum mlx4_special_vlan_idx {
|
|||||||
MLX4_VLAN_REGULAR
|
MLX4_VLAN_REGULAR
|
||||||
};
|
};
|
||||||
|
|
||||||
|
enum mlx4_steer_type {
|
||||||
|
MLX4_MC_STEER = 0,
|
||||||
|
MLX4_UC_STEER,
|
||||||
|
MLX4_NUM_STEERS
|
||||||
|
};
|
||||||
|
|
||||||
enum {
|
enum {
|
||||||
MLX4_NUM_FEXCH = 64 * 1024,
|
MLX4_NUM_FEXCH = 64 * 1024,
|
||||||
};
|
};
|
||||||
@ -223,6 +236,7 @@ struct mlx4_caps {
|
|||||||
int num_eqs;
|
int num_eqs;
|
||||||
int reserved_eqs;
|
int reserved_eqs;
|
||||||
int num_comp_vectors;
|
int num_comp_vectors;
|
||||||
|
int comp_pool;
|
||||||
int num_mpts;
|
int num_mpts;
|
||||||
int num_mtt_segs;
|
int num_mtt_segs;
|
||||||
int mtts_per_seg;
|
int mtts_per_seg;
|
||||||
@ -245,6 +259,9 @@ struct mlx4_caps {
|
|||||||
u16 stat_rate_support;
|
u16 stat_rate_support;
|
||||||
int udp_rss;
|
int udp_rss;
|
||||||
int loopback_support;
|
int loopback_support;
|
||||||
|
int vep_uc_steering;
|
||||||
|
int vep_mc_steering;
|
||||||
|
int wol;
|
||||||
u8 port_width_cap[MLX4_MAX_PORTS + 1];
|
u8 port_width_cap[MLX4_MAX_PORTS + 1];
|
||||||
int max_gso_sz;
|
int max_gso_sz;
|
||||||
int reserved_qps_cnt[MLX4_NUM_QP_REGION];
|
int reserved_qps_cnt[MLX4_NUM_QP_REGION];
|
||||||
@ -334,6 +351,17 @@ struct mlx4_fmr {
|
|||||||
struct mlx4_uar {
|
struct mlx4_uar {
|
||||||
unsigned long pfn;
|
unsigned long pfn;
|
||||||
int index;
|
int index;
|
||||||
|
struct list_head bf_list;
|
||||||
|
unsigned free_bf_bmap;
|
||||||
|
void __iomem *map;
|
||||||
|
void __iomem *bf_map;
|
||||||
|
};
|
||||||
|
|
||||||
|
struct mlx4_bf {
|
||||||
|
unsigned long offset;
|
||||||
|
int buf_size;
|
||||||
|
struct mlx4_uar *uar;
|
||||||
|
void __iomem *reg;
|
||||||
};
|
};
|
||||||
|
|
||||||
struct mlx4_cq {
|
struct mlx4_cq {
|
||||||
@ -415,7 +443,7 @@ struct mlx4_dev {
|
|||||||
unsigned long flags;
|
unsigned long flags;
|
||||||
struct mlx4_caps caps;
|
struct mlx4_caps caps;
|
||||||
struct radix_tree_root qp_table_tree;
|
struct radix_tree_root qp_table_tree;
|
||||||
u32 rev_id;
|
u8 rev_id;
|
||||||
char board_id[MLX4_BOARD_ID_LEN];
|
char board_id[MLX4_BOARD_ID_LEN];
|
||||||
};
|
};
|
||||||
|
|
||||||
@ -461,6 +489,8 @@ void mlx4_pd_free(struct mlx4_dev *dev, u32 pdn);
|
|||||||
|
|
||||||
int mlx4_uar_alloc(struct mlx4_dev *dev, struct mlx4_uar *uar);
|
int mlx4_uar_alloc(struct mlx4_dev *dev, struct mlx4_uar *uar);
|
||||||
void mlx4_uar_free(struct mlx4_dev *dev, struct mlx4_uar *uar);
|
void mlx4_uar_free(struct mlx4_dev *dev, struct mlx4_uar *uar);
|
||||||
|
int mlx4_bf_alloc(struct mlx4_dev *dev, struct mlx4_bf *bf);
|
||||||
|
void mlx4_bf_free(struct mlx4_dev *dev, struct mlx4_bf *bf);
|
||||||
|
|
||||||
int mlx4_mtt_init(struct mlx4_dev *dev, int npages, int page_shift,
|
int mlx4_mtt_init(struct mlx4_dev *dev, int npages, int page_shift,
|
||||||
struct mlx4_mtt *mtt);
|
struct mlx4_mtt *mtt);
|
||||||
@ -508,9 +538,15 @@ int mlx4_multicast_attach(struct mlx4_dev *dev, struct mlx4_qp *qp, u8 gid[16],
|
|||||||
int block_mcast_loopback, enum mlx4_protocol protocol);
|
int block_mcast_loopback, enum mlx4_protocol protocol);
|
||||||
int mlx4_multicast_detach(struct mlx4_dev *dev, struct mlx4_qp *qp, u8 gid[16],
|
int mlx4_multicast_detach(struct mlx4_dev *dev, struct mlx4_qp *qp, u8 gid[16],
|
||||||
enum mlx4_protocol protocol);
|
enum mlx4_protocol protocol);
|
||||||
|
int mlx4_multicast_promisc_add(struct mlx4_dev *dev, u32 qpn, u8 port);
|
||||||
|
int mlx4_multicast_promisc_remove(struct mlx4_dev *dev, u32 qpn, u8 port);
|
||||||
|
int mlx4_unicast_promisc_add(struct mlx4_dev *dev, u32 qpn, u8 port);
|
||||||
|
int mlx4_unicast_promisc_remove(struct mlx4_dev *dev, u32 qpn, u8 port);
|
||||||
|
int mlx4_SET_MCAST_FLTR(struct mlx4_dev *dev, u8 port, u64 mac, u64 clear, u8 mode);
|
||||||
|
|
||||||
int mlx4_register_mac(struct mlx4_dev *dev, u8 port, u64 mac, int *index);
|
int mlx4_register_mac(struct mlx4_dev *dev, u8 port, u64 mac, int *qpn, u8 wrap);
|
||||||
void mlx4_unregister_mac(struct mlx4_dev *dev, u8 port, int index);
|
void mlx4_unregister_mac(struct mlx4_dev *dev, u8 port, int qpn);
|
||||||
|
int mlx4_replace_mac(struct mlx4_dev *dev, u8 port, int qpn, u64 new_mac, u8 wrap);
|
||||||
|
|
||||||
int mlx4_find_cached_vlan(struct mlx4_dev *dev, u8 port, u16 vid, int *idx);
|
int mlx4_find_cached_vlan(struct mlx4_dev *dev, u8 port, u16 vid, int *idx);
|
||||||
int mlx4_register_vlan(struct mlx4_dev *dev, u8 port, u16 vlan, int *index);
|
int mlx4_register_vlan(struct mlx4_dev *dev, u8 port, u16 vlan, int *index);
|
||||||
@ -526,5 +562,10 @@ void mlx4_fmr_unmap(struct mlx4_dev *dev, struct mlx4_fmr *fmr,
|
|||||||
int mlx4_fmr_free(struct mlx4_dev *dev, struct mlx4_fmr *fmr);
|
int mlx4_fmr_free(struct mlx4_dev *dev, struct mlx4_fmr *fmr);
|
||||||
int mlx4_SYNC_TPT(struct mlx4_dev *dev);
|
int mlx4_SYNC_TPT(struct mlx4_dev *dev);
|
||||||
int mlx4_test_interrupts(struct mlx4_dev *dev);
|
int mlx4_test_interrupts(struct mlx4_dev *dev);
|
||||||
|
int mlx4_assign_eq(struct mlx4_dev *dev, char* name , int* vector);
|
||||||
|
void mlx4_release_eq(struct mlx4_dev *dev, int vec);
|
||||||
|
|
||||||
|
int mlx4_wol_read(struct mlx4_dev *dev, u64 *config, int port);
|
||||||
|
int mlx4_wol_write(struct mlx4_dev *dev, u64 config, int port);
|
||||||
|
|
||||||
#endif /* MLX4_DEVICE_H */
|
#endif /* MLX4_DEVICE_H */
|
||||||
|
@ -303,6 +303,7 @@ struct mlx4_wqe_data_seg {
|
|||||||
|
|
||||||
enum {
|
enum {
|
||||||
MLX4_INLINE_ALIGN = 64,
|
MLX4_INLINE_ALIGN = 64,
|
||||||
|
MLX4_INLINE_SEG = 1 << 31,
|
||||||
};
|
};
|
||||||
|
|
||||||
struct mlx4_wqe_inline_seg {
|
struct mlx4_wqe_inline_seg {
|
||||||
|
@ -486,7 +486,8 @@ struct rate_info {
|
|||||||
* @plink_state: mesh peer link state
|
* @plink_state: mesh peer link state
|
||||||
* @signal: signal strength of last received packet in dBm
|
* @signal: signal strength of last received packet in dBm
|
||||||
* @signal_avg: signal strength average in dBm
|
* @signal_avg: signal strength average in dBm
|
||||||
* @txrate: current unicast bitrate to this station
|
* @txrate: current unicast bitrate from this station
|
||||||
|
* @rxrate: current unicast bitrate to this station
|
||||||
* @rx_packets: packets received from this station
|
* @rx_packets: packets received from this station
|
||||||
* @tx_packets: packets transmitted to this station
|
* @tx_packets: packets transmitted to this station
|
||||||
* @tx_retries: cumulative retry counts
|
* @tx_retries: cumulative retry counts
|
||||||
|
@ -70,7 +70,7 @@ static inline struct inet_peer *rt6_get_peer(struct rt6_info *rt)
|
|||||||
extern void ip6_route_input(struct sk_buff *skb);
|
extern void ip6_route_input(struct sk_buff *skb);
|
||||||
|
|
||||||
extern struct dst_entry * ip6_route_output(struct net *net,
|
extern struct dst_entry * ip6_route_output(struct net *net,
|
||||||
struct sock *sk,
|
const struct sock *sk,
|
||||||
struct flowi6 *fl6);
|
struct flowi6 *fl6);
|
||||||
|
|
||||||
extern int ip6_route_init(void);
|
extern int ip6_route_init(void);
|
||||||
|
@ -51,7 +51,6 @@ struct fib_nh {
|
|||||||
struct fib_info *nh_parent;
|
struct fib_info *nh_parent;
|
||||||
unsigned nh_flags;
|
unsigned nh_flags;
|
||||||
unsigned char nh_scope;
|
unsigned char nh_scope;
|
||||||
unsigned char nh_cfg_scope;
|
|
||||||
#ifdef CONFIG_IP_ROUTE_MULTIPATH
|
#ifdef CONFIG_IP_ROUTE_MULTIPATH
|
||||||
int nh_weight;
|
int nh_weight;
|
||||||
int nh_power;
|
int nh_power;
|
||||||
@ -62,6 +61,7 @@ struct fib_nh {
|
|||||||
int nh_oif;
|
int nh_oif;
|
||||||
__be32 nh_gw;
|
__be32 nh_gw;
|
||||||
__be32 nh_saddr;
|
__be32 nh_saddr;
|
||||||
|
int nh_saddr_genid;
|
||||||
};
|
};
|
||||||
|
|
||||||
/*
|
/*
|
||||||
@ -74,9 +74,10 @@ struct fib_info {
|
|||||||
struct net *fib_net;
|
struct net *fib_net;
|
||||||
int fib_treeref;
|
int fib_treeref;
|
||||||
atomic_t fib_clntref;
|
atomic_t fib_clntref;
|
||||||
int fib_dead;
|
|
||||||
unsigned fib_flags;
|
unsigned fib_flags;
|
||||||
int fib_protocol;
|
unsigned char fib_dead;
|
||||||
|
unsigned char fib_protocol;
|
||||||
|
unsigned char fib_scope;
|
||||||
__be32 fib_prefsrc;
|
__be32 fib_prefsrc;
|
||||||
u32 fib_priority;
|
u32 fib_priority;
|
||||||
u32 *fib_metrics;
|
u32 *fib_metrics;
|
||||||
@ -141,12 +142,19 @@ struct fib_result_nl {
|
|||||||
|
|
||||||
#endif /* CONFIG_IP_ROUTE_MULTIPATH */
|
#endif /* CONFIG_IP_ROUTE_MULTIPATH */
|
||||||
|
|
||||||
#define FIB_RES_SADDR(res) (FIB_RES_NH(res).nh_saddr)
|
extern __be32 fib_info_update_nh_saddr(struct net *net, struct fib_nh *nh);
|
||||||
|
|
||||||
|
#define FIB_RES_SADDR(net, res) \
|
||||||
|
((FIB_RES_NH(res).nh_saddr_genid == \
|
||||||
|
atomic_read(&(net)->ipv4.dev_addr_genid)) ? \
|
||||||
|
FIB_RES_NH(res).nh_saddr : \
|
||||||
|
fib_info_update_nh_saddr((net), &FIB_RES_NH(res)))
|
||||||
#define FIB_RES_GW(res) (FIB_RES_NH(res).nh_gw)
|
#define FIB_RES_GW(res) (FIB_RES_NH(res).nh_gw)
|
||||||
#define FIB_RES_DEV(res) (FIB_RES_NH(res).nh_dev)
|
#define FIB_RES_DEV(res) (FIB_RES_NH(res).nh_dev)
|
||||||
#define FIB_RES_OIF(res) (FIB_RES_NH(res).nh_oif)
|
#define FIB_RES_OIF(res) (FIB_RES_NH(res).nh_oif)
|
||||||
|
|
||||||
#define FIB_RES_PREFSRC(res) ((res).fi->fib_prefsrc ? : FIB_RES_SADDR(res))
|
#define FIB_RES_PREFSRC(net, res) ((res).fi->fib_prefsrc ? : \
|
||||||
|
FIB_RES_SADDR(net, res))
|
||||||
|
|
||||||
struct fib_table {
|
struct fib_table {
|
||||||
struct hlist_node tb_hlist;
|
struct hlist_node tb_hlist;
|
||||||
|
@ -1160,7 +1160,7 @@ enum ieee80211_hw_flags {
|
|||||||
* @napi_weight: weight used for NAPI polling. You must specify an
|
* @napi_weight: weight used for NAPI polling. You must specify an
|
||||||
* appropriate value here if a napi_poll operation is provided
|
* appropriate value here if a napi_poll operation is provided
|
||||||
* by your driver.
|
* by your driver.
|
||||||
|
*
|
||||||
* @max_rx_aggregation_subframes: maximum buffer size (number of
|
* @max_rx_aggregation_subframes: maximum buffer size (number of
|
||||||
* sub-frames) to be used for A-MPDU block ack receiver
|
* sub-frames) to be used for A-MPDU block ack receiver
|
||||||
* aggregation.
|
* aggregation.
|
||||||
|
@ -55,6 +55,7 @@ struct netns_ipv4 {
|
|||||||
int current_rt_cache_rebuild_count;
|
int current_rt_cache_rebuild_count;
|
||||||
|
|
||||||
atomic_t rt_genid;
|
atomic_t rt_genid;
|
||||||
|
atomic_t dev_addr_genid;
|
||||||
|
|
||||||
#ifdef CONFIG_IP_MROUTE
|
#ifdef CONFIG_IP_MROUTE
|
||||||
#ifndef CONFIG_IP_MROUTE_MULTIPLE_TABLES
|
#ifndef CONFIG_IP_MROUTE_MULTIPLE_TABLES
|
||||||
|
@ -207,6 +207,7 @@ extern int ip_rt_dump(struct sk_buff *skb, struct netlink_callback *cb);
|
|||||||
|
|
||||||
struct in_ifaddr;
|
struct in_ifaddr;
|
||||||
extern void fib_add_ifaddr(struct in_ifaddr *);
|
extern void fib_add_ifaddr(struct in_ifaddr *);
|
||||||
|
extern void fib_del_ifaddr(struct in_ifaddr *, struct in_ifaddr *);
|
||||||
|
|
||||||
static inline void ip_rt_put(struct rtable * rt)
|
static inline void ip_rt_put(struct rtable * rt)
|
||||||
{
|
{
|
||||||
@ -269,8 +270,8 @@ static inline struct rtable *ip_route_newports(struct rtable *rt,
|
|||||||
struct flowi4 fl4 = {
|
struct flowi4 fl4 = {
|
||||||
.flowi4_oif = rt->rt_oif,
|
.flowi4_oif = rt->rt_oif,
|
||||||
.flowi4_mark = rt->rt_mark,
|
.flowi4_mark = rt->rt_mark,
|
||||||
.daddr = rt->rt_key_dst,
|
.daddr = rt->rt_dst,
|
||||||
.saddr = rt->rt_key_src,
|
.saddr = rt->rt_src,
|
||||||
.flowi4_tos = rt->rt_tos,
|
.flowi4_tos = rt->rt_tos,
|
||||||
.flowi4_proto = protocol,
|
.flowi4_proto = protocol,
|
||||||
.fl4_sport = sport,
|
.fl4_sport = sport,
|
||||||
|
@ -25,6 +25,7 @@ struct qdisc_rate_table {
|
|||||||
enum qdisc_state_t {
|
enum qdisc_state_t {
|
||||||
__QDISC_STATE_SCHED,
|
__QDISC_STATE_SCHED,
|
||||||
__QDISC_STATE_DEACTIVATED,
|
__QDISC_STATE_DEACTIVATED,
|
||||||
|
__QDISC_STATE_THROTTLED,
|
||||||
};
|
};
|
||||||
|
|
||||||
/*
|
/*
|
||||||
@ -32,7 +33,6 @@ enum qdisc_state_t {
|
|||||||
*/
|
*/
|
||||||
enum qdisc___state_t {
|
enum qdisc___state_t {
|
||||||
__QDISC___STATE_RUNNING = 1,
|
__QDISC___STATE_RUNNING = 1,
|
||||||
__QDISC___STATE_THROTTLED = 2,
|
|
||||||
};
|
};
|
||||||
|
|
||||||
struct qdisc_size_table {
|
struct qdisc_size_table {
|
||||||
@ -106,17 +106,17 @@ static inline void qdisc_run_end(struct Qdisc *qdisc)
|
|||||||
|
|
||||||
static inline bool qdisc_is_throttled(const struct Qdisc *qdisc)
|
static inline bool qdisc_is_throttled(const struct Qdisc *qdisc)
|
||||||
{
|
{
|
||||||
return (qdisc->__state & __QDISC___STATE_THROTTLED) ? true : false;
|
return test_bit(__QDISC_STATE_THROTTLED, &qdisc->state) ? true : false;
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline void qdisc_throttled(struct Qdisc *qdisc)
|
static inline void qdisc_throttled(struct Qdisc *qdisc)
|
||||||
{
|
{
|
||||||
qdisc->__state |= __QDISC___STATE_THROTTLED;
|
set_bit(__QDISC_STATE_THROTTLED, &qdisc->state);
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline void qdisc_unthrottled(struct Qdisc *qdisc)
|
static inline void qdisc_unthrottled(struct Qdisc *qdisc)
|
||||||
{
|
{
|
||||||
qdisc->__state &= ~__QDISC___STATE_THROTTLED;
|
clear_bit(__QDISC_STATE_THROTTLED, &qdisc->state);
|
||||||
}
|
}
|
||||||
|
|
||||||
struct Qdisc_class_ops {
|
struct Qdisc_class_ops {
|
||||||
|
@ -445,9 +445,9 @@ static struct sk_buff *br_ip6_multicast_alloc_query(struct net_bridge *br,
|
|||||||
ip6h->payload_len = htons(8 + sizeof(*mldq));
|
ip6h->payload_len = htons(8 + sizeof(*mldq));
|
||||||
ip6h->nexthdr = IPPROTO_HOPOPTS;
|
ip6h->nexthdr = IPPROTO_HOPOPTS;
|
||||||
ip6h->hop_limit = 1;
|
ip6h->hop_limit = 1;
|
||||||
|
ipv6_addr_set(&ip6h->daddr, htonl(0xff020000), 0, 0, htonl(1));
|
||||||
ipv6_dev_get_saddr(dev_net(br->dev), br->dev, &ip6h->daddr, 0,
|
ipv6_dev_get_saddr(dev_net(br->dev), br->dev, &ip6h->daddr, 0,
|
||||||
&ip6h->saddr);
|
&ip6h->saddr);
|
||||||
ipv6_addr_set(&ip6h->daddr, htonl(0xff020000), 0, 0, htonl(1));
|
|
||||||
ipv6_eth_mc_map(&ip6h->daddr, eth->h_dest);
|
ipv6_eth_mc_map(&ip6h->daddr, eth->h_dest);
|
||||||
|
|
||||||
hopopt = (u8 *)(ip6h + 1);
|
hopopt = (u8 *)(ip6h + 1);
|
||||||
|
@ -1353,14 +1353,17 @@ EXPORT_SYMBOL(dev_close);
|
|||||||
*/
|
*/
|
||||||
void dev_disable_lro(struct net_device *dev)
|
void dev_disable_lro(struct net_device *dev)
|
||||||
{
|
{
|
||||||
if (dev->ethtool_ops && dev->ethtool_ops->get_flags &&
|
u32 flags;
|
||||||
dev->ethtool_ops->set_flags) {
|
|
||||||
u32 flags = dev->ethtool_ops->get_flags(dev);
|
if (dev->ethtool_ops && dev->ethtool_ops->get_flags)
|
||||||
if (flags & ETH_FLAG_LRO) {
|
flags = dev->ethtool_ops->get_flags(dev);
|
||||||
flags &= ~ETH_FLAG_LRO;
|
else
|
||||||
dev->ethtool_ops->set_flags(dev, flags);
|
flags = ethtool_op_get_flags(dev);
|
||||||
}
|
|
||||||
}
|
if (!(flags & ETH_FLAG_LRO))
|
||||||
|
return;
|
||||||
|
|
||||||
|
__ethtool_set_flags(dev, flags & ~ETH_FLAG_LRO);
|
||||||
WARN_ON(dev->features & NETIF_F_LRO);
|
WARN_ON(dev->features & NETIF_F_LRO);
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL(dev_disable_lro);
|
EXPORT_SYMBOL(dev_disable_lro);
|
||||||
|
@ -513,7 +513,7 @@ static int ethtool_set_one_feature(struct net_device *dev,
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
static int __ethtool_set_flags(struct net_device *dev, u32 data)
|
int __ethtool_set_flags(struct net_device *dev, u32 data)
|
||||||
{
|
{
|
||||||
u32 changed;
|
u32 changed;
|
||||||
|
|
||||||
|
@ -64,6 +64,8 @@
|
|||||||
#include <net/rtnetlink.h>
|
#include <net/rtnetlink.h>
|
||||||
#include <net/net_namespace.h>
|
#include <net/net_namespace.h>
|
||||||
|
|
||||||
|
#include "fib_lookup.h"
|
||||||
|
|
||||||
static struct ipv4_devconf ipv4_devconf = {
|
static struct ipv4_devconf ipv4_devconf = {
|
||||||
.data = {
|
.data = {
|
||||||
[IPV4_DEVCONF_ACCEPT_REDIRECTS - 1] = 1,
|
[IPV4_DEVCONF_ACCEPT_REDIRECTS - 1] = 1,
|
||||||
@ -151,6 +153,20 @@ struct net_device *__ip_dev_find(struct net *net, __be32 addr, bool devref)
|
|||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
if (!result) {
|
||||||
|
struct flowi4 fl4 = { .daddr = addr };
|
||||||
|
struct fib_result res = { 0 };
|
||||||
|
struct fib_table *local;
|
||||||
|
|
||||||
|
/* Fallback to FIB local table so that communication
|
||||||
|
* over loopback subnets work.
|
||||||
|
*/
|
||||||
|
local = fib_get_table(net, RT_TABLE_LOCAL);
|
||||||
|
if (local &&
|
||||||
|
!fib_table_lookup(local, &fl4, &res, FIB_LOOKUP_NOREF) &&
|
||||||
|
res.type == RTN_LOCAL)
|
||||||
|
result = FIB_RES_DEV(res);
|
||||||
|
}
|
||||||
if (result && devref)
|
if (result && devref)
|
||||||
dev_hold(result);
|
dev_hold(result);
|
||||||
rcu_read_unlock();
|
rcu_read_unlock();
|
||||||
@ -345,6 +361,17 @@ static void __inet_del_ifa(struct in_device *in_dev, struct in_ifaddr **ifap,
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/* On promotion all secondaries from subnet are changing
|
||||||
|
* the primary IP, we must remove all their routes silently
|
||||||
|
* and later to add them back with new prefsrc. Do this
|
||||||
|
* while all addresses are on the device list.
|
||||||
|
*/
|
||||||
|
for (ifa = promote; ifa; ifa = ifa->ifa_next) {
|
||||||
|
if (ifa1->ifa_mask == ifa->ifa_mask &&
|
||||||
|
inet_ifa_match(ifa1->ifa_address, ifa))
|
||||||
|
fib_del_ifaddr(ifa, ifa1);
|
||||||
|
}
|
||||||
|
|
||||||
/* 2. Unlink it */
|
/* 2. Unlink it */
|
||||||
|
|
||||||
*ifap = ifa1->ifa_next;
|
*ifap = ifa1->ifa_next;
|
||||||
@ -364,6 +391,7 @@ static void __inet_del_ifa(struct in_device *in_dev, struct in_ifaddr **ifap,
|
|||||||
blocking_notifier_call_chain(&inetaddr_chain, NETDEV_DOWN, ifa1);
|
blocking_notifier_call_chain(&inetaddr_chain, NETDEV_DOWN, ifa1);
|
||||||
|
|
||||||
if (promote) {
|
if (promote) {
|
||||||
|
struct in_ifaddr *next_sec = promote->ifa_next;
|
||||||
|
|
||||||
if (prev_prom) {
|
if (prev_prom) {
|
||||||
prev_prom->ifa_next = promote->ifa_next;
|
prev_prom->ifa_next = promote->ifa_next;
|
||||||
@ -375,7 +403,7 @@ static void __inet_del_ifa(struct in_device *in_dev, struct in_ifaddr **ifap,
|
|||||||
rtmsg_ifa(RTM_NEWADDR, promote, nlh, pid);
|
rtmsg_ifa(RTM_NEWADDR, promote, nlh, pid);
|
||||||
blocking_notifier_call_chain(&inetaddr_chain,
|
blocking_notifier_call_chain(&inetaddr_chain,
|
||||||
NETDEV_UP, promote);
|
NETDEV_UP, promote);
|
||||||
for (ifa = promote->ifa_next; ifa; ifa = ifa->ifa_next) {
|
for (ifa = next_sec; ifa; ifa = ifa->ifa_next) {
|
||||||
if (ifa1->ifa_mask != ifa->ifa_mask ||
|
if (ifa1->ifa_mask != ifa->ifa_mask ||
|
||||||
!inet_ifa_match(ifa1->ifa_address, ifa))
|
!inet_ifa_match(ifa1->ifa_address, ifa))
|
||||||
continue;
|
continue;
|
||||||
|
@ -228,7 +228,7 @@ int fib_validate_source(__be32 src, __be32 dst, u8 tos, int oif,
|
|||||||
if (res.type != RTN_LOCAL || !accept_local)
|
if (res.type != RTN_LOCAL || !accept_local)
|
||||||
goto e_inval;
|
goto e_inval;
|
||||||
}
|
}
|
||||||
*spec_dst = FIB_RES_PREFSRC(res);
|
*spec_dst = FIB_RES_PREFSRC(net, res);
|
||||||
fib_combine_itag(itag, &res);
|
fib_combine_itag(itag, &res);
|
||||||
dev_match = false;
|
dev_match = false;
|
||||||
|
|
||||||
@ -258,7 +258,7 @@ int fib_validate_source(__be32 src, __be32 dst, u8 tos, int oif,
|
|||||||
ret = 0;
|
ret = 0;
|
||||||
if (fib_lookup(net, &fl4, &res) == 0) {
|
if (fib_lookup(net, &fl4, &res) == 0) {
|
||||||
if (res.type == RTN_UNICAST) {
|
if (res.type == RTN_UNICAST) {
|
||||||
*spec_dst = FIB_RES_PREFSRC(res);
|
*spec_dst = FIB_RES_PREFSRC(net, res);
|
||||||
ret = FIB_RES_NH(res).nh_scope >= RT_SCOPE_HOST;
|
ret = FIB_RES_NH(res).nh_scope >= RT_SCOPE_HOST;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -722,12 +722,17 @@ void fib_add_ifaddr(struct in_ifaddr *ifa)
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
static void fib_del_ifaddr(struct in_ifaddr *ifa)
|
/* Delete primary or secondary address.
|
||||||
|
* Optionally, on secondary address promotion consider the addresses
|
||||||
|
* from subnet iprim as deleted, even if they are in device list.
|
||||||
|
* In this case the secondary ifa can be in device list.
|
||||||
|
*/
|
||||||
|
void fib_del_ifaddr(struct in_ifaddr *ifa, struct in_ifaddr *iprim)
|
||||||
{
|
{
|
||||||
struct in_device *in_dev = ifa->ifa_dev;
|
struct in_device *in_dev = ifa->ifa_dev;
|
||||||
struct net_device *dev = in_dev->dev;
|
struct net_device *dev = in_dev->dev;
|
||||||
struct in_ifaddr *ifa1;
|
struct in_ifaddr *ifa1;
|
||||||
struct in_ifaddr *prim = ifa;
|
struct in_ifaddr *prim = ifa, *prim1 = NULL;
|
||||||
__be32 brd = ifa->ifa_address | ~ifa->ifa_mask;
|
__be32 brd = ifa->ifa_address | ~ifa->ifa_mask;
|
||||||
__be32 any = ifa->ifa_address & ifa->ifa_mask;
|
__be32 any = ifa->ifa_address & ifa->ifa_mask;
|
||||||
#define LOCAL_OK 1
|
#define LOCAL_OK 1
|
||||||
@ -735,17 +740,26 @@ static void fib_del_ifaddr(struct in_ifaddr *ifa)
|
|||||||
#define BRD0_OK 4
|
#define BRD0_OK 4
|
||||||
#define BRD1_OK 8
|
#define BRD1_OK 8
|
||||||
unsigned ok = 0;
|
unsigned ok = 0;
|
||||||
|
int subnet = 0; /* Primary network */
|
||||||
|
int gone = 1; /* Address is missing */
|
||||||
|
int same_prefsrc = 0; /* Another primary with same IP */
|
||||||
|
|
||||||
if (!(ifa->ifa_flags & IFA_F_SECONDARY))
|
if (ifa->ifa_flags & IFA_F_SECONDARY) {
|
||||||
fib_magic(RTM_DELROUTE,
|
|
||||||
dev->flags & IFF_LOOPBACK ? RTN_LOCAL : RTN_UNICAST,
|
|
||||||
any, ifa->ifa_prefixlen, prim);
|
|
||||||
else {
|
|
||||||
prim = inet_ifa_byprefix(in_dev, any, ifa->ifa_mask);
|
prim = inet_ifa_byprefix(in_dev, any, ifa->ifa_mask);
|
||||||
if (prim == NULL) {
|
if (prim == NULL) {
|
||||||
printk(KERN_WARNING "fib_del_ifaddr: bug: prim == NULL\n");
|
printk(KERN_WARNING "fib_del_ifaddr: bug: prim == NULL\n");
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
if (iprim && iprim != prim) {
|
||||||
|
printk(KERN_WARNING "fib_del_ifaddr: bug: iprim != prim\n");
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
} else if (!ipv4_is_zeronet(any) &&
|
||||||
|
(any != ifa->ifa_local || ifa->ifa_prefixlen < 32)) {
|
||||||
|
fib_magic(RTM_DELROUTE,
|
||||||
|
dev->flags & IFF_LOOPBACK ? RTN_LOCAL : RTN_UNICAST,
|
||||||
|
any, ifa->ifa_prefixlen, prim);
|
||||||
|
subnet = 1;
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Deletion is more complicated than add.
|
/* Deletion is more complicated than add.
|
||||||
@ -755,6 +769,49 @@ static void fib_del_ifaddr(struct in_ifaddr *ifa)
|
|||||||
*/
|
*/
|
||||||
|
|
||||||
for (ifa1 = in_dev->ifa_list; ifa1; ifa1 = ifa1->ifa_next) {
|
for (ifa1 = in_dev->ifa_list; ifa1; ifa1 = ifa1->ifa_next) {
|
||||||
|
if (ifa1 == ifa) {
|
||||||
|
/* promotion, keep the IP */
|
||||||
|
gone = 0;
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
/* Ignore IFAs from our subnet */
|
||||||
|
if (iprim && ifa1->ifa_mask == iprim->ifa_mask &&
|
||||||
|
inet_ifa_match(ifa1->ifa_address, iprim))
|
||||||
|
continue;
|
||||||
|
|
||||||
|
/* Ignore ifa1 if it uses different primary IP (prefsrc) */
|
||||||
|
if (ifa1->ifa_flags & IFA_F_SECONDARY) {
|
||||||
|
/* Another address from our subnet? */
|
||||||
|
if (ifa1->ifa_mask == prim->ifa_mask &&
|
||||||
|
inet_ifa_match(ifa1->ifa_address, prim))
|
||||||
|
prim1 = prim;
|
||||||
|
else {
|
||||||
|
/* We reached the secondaries, so
|
||||||
|
* same_prefsrc should be determined.
|
||||||
|
*/
|
||||||
|
if (!same_prefsrc)
|
||||||
|
continue;
|
||||||
|
/* Search new prim1 if ifa1 is not
|
||||||
|
* using the current prim1
|
||||||
|
*/
|
||||||
|
if (!prim1 ||
|
||||||
|
ifa1->ifa_mask != prim1->ifa_mask ||
|
||||||
|
!inet_ifa_match(ifa1->ifa_address, prim1))
|
||||||
|
prim1 = inet_ifa_byprefix(in_dev,
|
||||||
|
ifa1->ifa_address,
|
||||||
|
ifa1->ifa_mask);
|
||||||
|
if (!prim1)
|
||||||
|
continue;
|
||||||
|
if (prim1->ifa_local != prim->ifa_local)
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
if (prim->ifa_local != ifa1->ifa_local)
|
||||||
|
continue;
|
||||||
|
prim1 = ifa1;
|
||||||
|
if (prim != prim1)
|
||||||
|
same_prefsrc = 1;
|
||||||
|
}
|
||||||
if (ifa->ifa_local == ifa1->ifa_local)
|
if (ifa->ifa_local == ifa1->ifa_local)
|
||||||
ok |= LOCAL_OK;
|
ok |= LOCAL_OK;
|
||||||
if (ifa->ifa_broadcast == ifa1->ifa_broadcast)
|
if (ifa->ifa_broadcast == ifa1->ifa_broadcast)
|
||||||
@ -763,19 +820,37 @@ static void fib_del_ifaddr(struct in_ifaddr *ifa)
|
|||||||
ok |= BRD1_OK;
|
ok |= BRD1_OK;
|
||||||
if (any == ifa1->ifa_broadcast)
|
if (any == ifa1->ifa_broadcast)
|
||||||
ok |= BRD0_OK;
|
ok |= BRD0_OK;
|
||||||
|
/* primary has network specific broadcasts */
|
||||||
|
if (prim1 == ifa1 && ifa1->ifa_prefixlen < 31) {
|
||||||
|
__be32 brd1 = ifa1->ifa_address | ~ifa1->ifa_mask;
|
||||||
|
__be32 any1 = ifa1->ifa_address & ifa1->ifa_mask;
|
||||||
|
|
||||||
|
if (!ipv4_is_zeronet(any1)) {
|
||||||
|
if (ifa->ifa_broadcast == brd1 ||
|
||||||
|
ifa->ifa_broadcast == any1)
|
||||||
|
ok |= BRD_OK;
|
||||||
|
if (brd == brd1 || brd == any1)
|
||||||
|
ok |= BRD1_OK;
|
||||||
|
if (any == brd1 || any == any1)
|
||||||
|
ok |= BRD0_OK;
|
||||||
|
}
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
if (!(ok & BRD_OK))
|
if (!(ok & BRD_OK))
|
||||||
fib_magic(RTM_DELROUTE, RTN_BROADCAST, ifa->ifa_broadcast, 32, prim);
|
fib_magic(RTM_DELROUTE, RTN_BROADCAST, ifa->ifa_broadcast, 32, prim);
|
||||||
if (!(ok & BRD1_OK))
|
if (subnet && ifa->ifa_prefixlen < 31) {
|
||||||
fib_magic(RTM_DELROUTE, RTN_BROADCAST, brd, 32, prim);
|
if (!(ok & BRD1_OK))
|
||||||
if (!(ok & BRD0_OK))
|
fib_magic(RTM_DELROUTE, RTN_BROADCAST, brd, 32, prim);
|
||||||
fib_magic(RTM_DELROUTE, RTN_BROADCAST, any, 32, prim);
|
if (!(ok & BRD0_OK))
|
||||||
|
fib_magic(RTM_DELROUTE, RTN_BROADCAST, any, 32, prim);
|
||||||
|
}
|
||||||
if (!(ok & LOCAL_OK)) {
|
if (!(ok & LOCAL_OK)) {
|
||||||
fib_magic(RTM_DELROUTE, RTN_LOCAL, ifa->ifa_local, 32, prim);
|
fib_magic(RTM_DELROUTE, RTN_LOCAL, ifa->ifa_local, 32, prim);
|
||||||
|
|
||||||
/* Check, that this local address finally disappeared. */
|
/* Check, that this local address finally disappeared. */
|
||||||
if (inet_addr_type(dev_net(dev), ifa->ifa_local) != RTN_LOCAL) {
|
if (gone &&
|
||||||
|
inet_addr_type(dev_net(dev), ifa->ifa_local) != RTN_LOCAL) {
|
||||||
/* And the last, but not the least thing.
|
/* And the last, but not the least thing.
|
||||||
* We must flush stray FIB entries.
|
* We must flush stray FIB entries.
|
||||||
*
|
*
|
||||||
@ -885,6 +960,7 @@ static int fib_inetaddr_event(struct notifier_block *this, unsigned long event,
|
|||||||
{
|
{
|
||||||
struct in_ifaddr *ifa = (struct in_ifaddr *)ptr;
|
struct in_ifaddr *ifa = (struct in_ifaddr *)ptr;
|
||||||
struct net_device *dev = ifa->ifa_dev->dev;
|
struct net_device *dev = ifa->ifa_dev->dev;
|
||||||
|
struct net *net = dev_net(dev);
|
||||||
|
|
||||||
switch (event) {
|
switch (event) {
|
||||||
case NETDEV_UP:
|
case NETDEV_UP:
|
||||||
@ -892,12 +968,12 @@ static int fib_inetaddr_event(struct notifier_block *this, unsigned long event,
|
|||||||
#ifdef CONFIG_IP_ROUTE_MULTIPATH
|
#ifdef CONFIG_IP_ROUTE_MULTIPATH
|
||||||
fib_sync_up(dev);
|
fib_sync_up(dev);
|
||||||
#endif
|
#endif
|
||||||
fib_update_nh_saddrs(dev);
|
atomic_inc(&net->ipv4.dev_addr_genid);
|
||||||
rt_cache_flush(dev_net(dev), -1);
|
rt_cache_flush(dev_net(dev), -1);
|
||||||
break;
|
break;
|
||||||
case NETDEV_DOWN:
|
case NETDEV_DOWN:
|
||||||
fib_del_ifaddr(ifa);
|
fib_del_ifaddr(ifa, NULL);
|
||||||
fib_update_nh_saddrs(dev);
|
atomic_inc(&net->ipv4.dev_addr_genid);
|
||||||
if (ifa->ifa_dev->ifa_list == NULL) {
|
if (ifa->ifa_dev->ifa_list == NULL) {
|
||||||
/* Last address was deleted from this interface.
|
/* Last address was deleted from this interface.
|
||||||
* Disable IP.
|
* Disable IP.
|
||||||
@ -915,6 +991,7 @@ static int fib_netdev_event(struct notifier_block *this, unsigned long event, vo
|
|||||||
{
|
{
|
||||||
struct net_device *dev = ptr;
|
struct net_device *dev = ptr;
|
||||||
struct in_device *in_dev = __in_dev_get_rtnl(dev);
|
struct in_device *in_dev = __in_dev_get_rtnl(dev);
|
||||||
|
struct net *net = dev_net(dev);
|
||||||
|
|
||||||
if (event == NETDEV_UNREGISTER) {
|
if (event == NETDEV_UNREGISTER) {
|
||||||
fib_disable_ip(dev, 2, -1);
|
fib_disable_ip(dev, 2, -1);
|
||||||
@ -932,6 +1009,7 @@ static int fib_netdev_event(struct notifier_block *this, unsigned long event, vo
|
|||||||
#ifdef CONFIG_IP_ROUTE_MULTIPATH
|
#ifdef CONFIG_IP_ROUTE_MULTIPATH
|
||||||
fib_sync_up(dev);
|
fib_sync_up(dev);
|
||||||
#endif
|
#endif
|
||||||
|
atomic_inc(&net->ipv4.dev_addr_genid);
|
||||||
rt_cache_flush(dev_net(dev), -1);
|
rt_cache_flush(dev_net(dev), -1);
|
||||||
break;
|
break;
|
||||||
case NETDEV_DOWN:
|
case NETDEV_DOWN:
|
||||||
|
@ -10,7 +10,6 @@ struct fib_alias {
|
|||||||
struct fib_info *fa_info;
|
struct fib_info *fa_info;
|
||||||
u8 fa_tos;
|
u8 fa_tos;
|
||||||
u8 fa_type;
|
u8 fa_type;
|
||||||
u8 fa_scope;
|
|
||||||
u8 fa_state;
|
u8 fa_state;
|
||||||
struct rcu_head rcu;
|
struct rcu_head rcu;
|
||||||
};
|
};
|
||||||
@ -29,7 +28,7 @@ extern void fib_release_info(struct fib_info *);
|
|||||||
extern struct fib_info *fib_create_info(struct fib_config *cfg);
|
extern struct fib_info *fib_create_info(struct fib_config *cfg);
|
||||||
extern int fib_nh_match(struct fib_config *cfg, struct fib_info *fi);
|
extern int fib_nh_match(struct fib_config *cfg, struct fib_info *fi);
|
||||||
extern int fib_dump_info(struct sk_buff *skb, u32 pid, u32 seq, int event,
|
extern int fib_dump_info(struct sk_buff *skb, u32 pid, u32 seq, int event,
|
||||||
u32 tb_id, u8 type, u8 scope, __be32 dst,
|
u32 tb_id, u8 type, __be32 dst,
|
||||||
int dst_len, u8 tos, struct fib_info *fi,
|
int dst_len, u8 tos, struct fib_info *fi,
|
||||||
unsigned int);
|
unsigned int);
|
||||||
extern void rtmsg_fib(int event, __be32 key, struct fib_alias *fa,
|
extern void rtmsg_fib(int event, __be32 key, struct fib_alias *fa,
|
||||||
|
@ -222,7 +222,7 @@ static inline unsigned int fib_info_hashfn(const struct fib_info *fi)
|
|||||||
unsigned int mask = (fib_info_hash_size - 1);
|
unsigned int mask = (fib_info_hash_size - 1);
|
||||||
unsigned int val = fi->fib_nhs;
|
unsigned int val = fi->fib_nhs;
|
||||||
|
|
||||||
val ^= fi->fib_protocol;
|
val ^= (fi->fib_protocol << 8) | fi->fib_scope;
|
||||||
val ^= (__force u32)fi->fib_prefsrc;
|
val ^= (__force u32)fi->fib_prefsrc;
|
||||||
val ^= fi->fib_priority;
|
val ^= fi->fib_priority;
|
||||||
for_nexthops(fi) {
|
for_nexthops(fi) {
|
||||||
@ -248,10 +248,11 @@ static struct fib_info *fib_find_info(const struct fib_info *nfi)
|
|||||||
if (fi->fib_nhs != nfi->fib_nhs)
|
if (fi->fib_nhs != nfi->fib_nhs)
|
||||||
continue;
|
continue;
|
||||||
if (nfi->fib_protocol == fi->fib_protocol &&
|
if (nfi->fib_protocol == fi->fib_protocol &&
|
||||||
|
nfi->fib_scope == fi->fib_scope &&
|
||||||
nfi->fib_prefsrc == fi->fib_prefsrc &&
|
nfi->fib_prefsrc == fi->fib_prefsrc &&
|
||||||
nfi->fib_priority == fi->fib_priority &&
|
nfi->fib_priority == fi->fib_priority &&
|
||||||
memcmp(nfi->fib_metrics, fi->fib_metrics,
|
memcmp(nfi->fib_metrics, fi->fib_metrics,
|
||||||
sizeof(fi->fib_metrics)) == 0 &&
|
sizeof(u32) * RTAX_MAX) == 0 &&
|
||||||
((nfi->fib_flags ^ fi->fib_flags) & ~RTNH_F_DEAD) == 0 &&
|
((nfi->fib_flags ^ fi->fib_flags) & ~RTNH_F_DEAD) == 0 &&
|
||||||
(nfi->fib_nhs == 0 || nh_comp(fi, nfi) == 0))
|
(nfi->fib_nhs == 0 || nh_comp(fi, nfi) == 0))
|
||||||
return fi;
|
return fi;
|
||||||
@ -328,7 +329,7 @@ void rtmsg_fib(int event, __be32 key, struct fib_alias *fa,
|
|||||||
goto errout;
|
goto errout;
|
||||||
|
|
||||||
err = fib_dump_info(skb, info->pid, seq, event, tb_id,
|
err = fib_dump_info(skb, info->pid, seq, event, tb_id,
|
||||||
fa->fa_type, fa->fa_scope, key, dst_len,
|
fa->fa_type, key, dst_len,
|
||||||
fa->fa_tos, fa->fa_info, nlm_flags);
|
fa->fa_tos, fa->fa_info, nlm_flags);
|
||||||
if (err < 0) {
|
if (err < 0) {
|
||||||
/* -EMSGSIZE implies BUG in fib_nlmsg_size() */
|
/* -EMSGSIZE implies BUG in fib_nlmsg_size() */
|
||||||
@ -695,6 +696,16 @@ static void fib_info_hash_move(struct hlist_head *new_info_hash,
|
|||||||
fib_info_hash_free(old_laddrhash, bytes);
|
fib_info_hash_free(old_laddrhash, bytes);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
__be32 fib_info_update_nh_saddr(struct net *net, struct fib_nh *nh)
|
||||||
|
{
|
||||||
|
nh->nh_saddr = inet_select_addr(nh->nh_dev,
|
||||||
|
nh->nh_gw,
|
||||||
|
nh->nh_parent->fib_scope);
|
||||||
|
nh->nh_saddr_genid = atomic_read(&net->ipv4.dev_addr_genid);
|
||||||
|
|
||||||
|
return nh->nh_saddr;
|
||||||
|
}
|
||||||
|
|
||||||
struct fib_info *fib_create_info(struct fib_config *cfg)
|
struct fib_info *fib_create_info(struct fib_config *cfg)
|
||||||
{
|
{
|
||||||
int err;
|
int err;
|
||||||
@ -753,6 +764,7 @@ struct fib_info *fib_create_info(struct fib_config *cfg)
|
|||||||
|
|
||||||
fi->fib_net = hold_net(net);
|
fi->fib_net = hold_net(net);
|
||||||
fi->fib_protocol = cfg->fc_protocol;
|
fi->fib_protocol = cfg->fc_protocol;
|
||||||
|
fi->fib_scope = cfg->fc_scope;
|
||||||
fi->fib_flags = cfg->fc_flags;
|
fi->fib_flags = cfg->fc_flags;
|
||||||
fi->fib_priority = cfg->fc_priority;
|
fi->fib_priority = cfg->fc_priority;
|
||||||
fi->fib_prefsrc = cfg->fc_prefsrc;
|
fi->fib_prefsrc = cfg->fc_prefsrc;
|
||||||
@ -854,10 +866,7 @@ struct fib_info *fib_create_info(struct fib_config *cfg)
|
|||||||
}
|
}
|
||||||
|
|
||||||
change_nexthops(fi) {
|
change_nexthops(fi) {
|
||||||
nexthop_nh->nh_cfg_scope = cfg->fc_scope;
|
fib_info_update_nh_saddr(net, nexthop_nh);
|
||||||
nexthop_nh->nh_saddr = inet_select_addr(nexthop_nh->nh_dev,
|
|
||||||
nexthop_nh->nh_gw,
|
|
||||||
nexthop_nh->nh_cfg_scope);
|
|
||||||
} endfor_nexthops(fi)
|
} endfor_nexthops(fi)
|
||||||
|
|
||||||
link_it:
|
link_it:
|
||||||
@ -906,7 +915,7 @@ failure:
|
|||||||
}
|
}
|
||||||
|
|
||||||
int fib_dump_info(struct sk_buff *skb, u32 pid, u32 seq, int event,
|
int fib_dump_info(struct sk_buff *skb, u32 pid, u32 seq, int event,
|
||||||
u32 tb_id, u8 type, u8 scope, __be32 dst, int dst_len, u8 tos,
|
u32 tb_id, u8 type, __be32 dst, int dst_len, u8 tos,
|
||||||
struct fib_info *fi, unsigned int flags)
|
struct fib_info *fi, unsigned int flags)
|
||||||
{
|
{
|
||||||
struct nlmsghdr *nlh;
|
struct nlmsghdr *nlh;
|
||||||
@ -928,7 +937,7 @@ int fib_dump_info(struct sk_buff *skb, u32 pid, u32 seq, int event,
|
|||||||
NLA_PUT_U32(skb, RTA_TABLE, tb_id);
|
NLA_PUT_U32(skb, RTA_TABLE, tb_id);
|
||||||
rtm->rtm_type = type;
|
rtm->rtm_type = type;
|
||||||
rtm->rtm_flags = fi->fib_flags;
|
rtm->rtm_flags = fi->fib_flags;
|
||||||
rtm->rtm_scope = scope;
|
rtm->rtm_scope = fi->fib_scope;
|
||||||
rtm->rtm_protocol = fi->fib_protocol;
|
rtm->rtm_protocol = fi->fib_protocol;
|
||||||
|
|
||||||
if (rtm->rtm_dst_len)
|
if (rtm->rtm_dst_len)
|
||||||
@ -1084,7 +1093,7 @@ void fib_select_default(struct fib_result *res)
|
|||||||
list_for_each_entry_rcu(fa, fa_head, fa_list) {
|
list_for_each_entry_rcu(fa, fa_head, fa_list) {
|
||||||
struct fib_info *next_fi = fa->fa_info;
|
struct fib_info *next_fi = fa->fa_info;
|
||||||
|
|
||||||
if (fa->fa_scope != res->scope ||
|
if (next_fi->fib_scope != res->scope ||
|
||||||
fa->fa_type != RTN_UNICAST)
|
fa->fa_type != RTN_UNICAST)
|
||||||
continue;
|
continue;
|
||||||
|
|
||||||
@ -1128,24 +1137,6 @@ out:
|
|||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
void fib_update_nh_saddrs(struct net_device *dev)
|
|
||||||
{
|
|
||||||
struct hlist_head *head;
|
|
||||||
struct hlist_node *node;
|
|
||||||
struct fib_nh *nh;
|
|
||||||
unsigned int hash;
|
|
||||||
|
|
||||||
hash = fib_devindex_hashfn(dev->ifindex);
|
|
||||||
head = &fib_info_devhash[hash];
|
|
||||||
hlist_for_each_entry(nh, node, head, nh_hash) {
|
|
||||||
if (nh->nh_dev != dev)
|
|
||||||
continue;
|
|
||||||
nh->nh_saddr = inet_select_addr(nh->nh_dev,
|
|
||||||
nh->nh_gw,
|
|
||||||
nh->nh_cfg_scope);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
#ifdef CONFIG_IP_ROUTE_MULTIPATH
|
#ifdef CONFIG_IP_ROUTE_MULTIPATH
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
@ -1245,7 +1245,6 @@ int fib_table_insert(struct fib_table *tb, struct fib_config *cfg)
|
|||||||
if (fa->fa_info->fib_priority != fi->fib_priority)
|
if (fa->fa_info->fib_priority != fi->fib_priority)
|
||||||
break;
|
break;
|
||||||
if (fa->fa_type == cfg->fc_type &&
|
if (fa->fa_type == cfg->fc_type &&
|
||||||
fa->fa_scope == cfg->fc_scope &&
|
|
||||||
fa->fa_info == fi) {
|
fa->fa_info == fi) {
|
||||||
fa_match = fa;
|
fa_match = fa;
|
||||||
break;
|
break;
|
||||||
@ -1271,7 +1270,6 @@ int fib_table_insert(struct fib_table *tb, struct fib_config *cfg)
|
|||||||
new_fa->fa_tos = fa->fa_tos;
|
new_fa->fa_tos = fa->fa_tos;
|
||||||
new_fa->fa_info = fi;
|
new_fa->fa_info = fi;
|
||||||
new_fa->fa_type = cfg->fc_type;
|
new_fa->fa_type = cfg->fc_type;
|
||||||
new_fa->fa_scope = cfg->fc_scope;
|
|
||||||
state = fa->fa_state;
|
state = fa->fa_state;
|
||||||
new_fa->fa_state = state & ~FA_S_ACCESSED;
|
new_fa->fa_state = state & ~FA_S_ACCESSED;
|
||||||
|
|
||||||
@ -1308,7 +1306,6 @@ int fib_table_insert(struct fib_table *tb, struct fib_config *cfg)
|
|||||||
new_fa->fa_info = fi;
|
new_fa->fa_info = fi;
|
||||||
new_fa->fa_tos = tos;
|
new_fa->fa_tos = tos;
|
||||||
new_fa->fa_type = cfg->fc_type;
|
new_fa->fa_type = cfg->fc_type;
|
||||||
new_fa->fa_scope = cfg->fc_scope;
|
|
||||||
new_fa->fa_state = 0;
|
new_fa->fa_state = 0;
|
||||||
/*
|
/*
|
||||||
* Insert new entry to the list.
|
* Insert new entry to the list.
|
||||||
@ -1362,7 +1359,7 @@ static int check_leaf(struct fib_table *tb, struct trie *t, struct leaf *l,
|
|||||||
|
|
||||||
if (fa->fa_tos && fa->fa_tos != flp->flowi4_tos)
|
if (fa->fa_tos && fa->fa_tos != flp->flowi4_tos)
|
||||||
continue;
|
continue;
|
||||||
if (fa->fa_scope < flp->flowi4_scope)
|
if (fa->fa_info->fib_scope < flp->flowi4_scope)
|
||||||
continue;
|
continue;
|
||||||
fib_alias_accessed(fa);
|
fib_alias_accessed(fa);
|
||||||
err = fib_props[fa->fa_type].error;
|
err = fib_props[fa->fa_type].error;
|
||||||
@ -1388,7 +1385,7 @@ static int check_leaf(struct fib_table *tb, struct trie *t, struct leaf *l,
|
|||||||
res->prefixlen = plen;
|
res->prefixlen = plen;
|
||||||
res->nh_sel = nhsel;
|
res->nh_sel = nhsel;
|
||||||
res->type = fa->fa_type;
|
res->type = fa->fa_type;
|
||||||
res->scope = fa->fa_scope;
|
res->scope = fa->fa_info->fib_scope;
|
||||||
res->fi = fi;
|
res->fi = fi;
|
||||||
res->table = tb;
|
res->table = tb;
|
||||||
res->fa_head = &li->falh;
|
res->fa_head = &li->falh;
|
||||||
@ -1664,7 +1661,9 @@ int fib_table_delete(struct fib_table *tb, struct fib_config *cfg)
|
|||||||
|
|
||||||
if ((!cfg->fc_type || fa->fa_type == cfg->fc_type) &&
|
if ((!cfg->fc_type || fa->fa_type == cfg->fc_type) &&
|
||||||
(cfg->fc_scope == RT_SCOPE_NOWHERE ||
|
(cfg->fc_scope == RT_SCOPE_NOWHERE ||
|
||||||
fa->fa_scope == cfg->fc_scope) &&
|
fa->fa_info->fib_scope == cfg->fc_scope) &&
|
||||||
|
(!cfg->fc_prefsrc ||
|
||||||
|
fi->fib_prefsrc == cfg->fc_prefsrc) &&
|
||||||
(!cfg->fc_protocol ||
|
(!cfg->fc_protocol ||
|
||||||
fi->fib_protocol == cfg->fc_protocol) &&
|
fi->fib_protocol == cfg->fc_protocol) &&
|
||||||
fib_nh_match(cfg, fi) == 0) {
|
fib_nh_match(cfg, fi) == 0) {
|
||||||
@ -1861,7 +1860,6 @@ static int fn_trie_dump_fa(t_key key, int plen, struct list_head *fah,
|
|||||||
RTM_NEWROUTE,
|
RTM_NEWROUTE,
|
||||||
tb->tb_id,
|
tb->tb_id,
|
||||||
fa->fa_type,
|
fa->fa_type,
|
||||||
fa->fa_scope,
|
|
||||||
xkey,
|
xkey,
|
||||||
plen,
|
plen,
|
||||||
fa->fa_tos,
|
fa->fa_tos,
|
||||||
@ -2382,7 +2380,7 @@ static int fib_trie_seq_show(struct seq_file *seq, void *v)
|
|||||||
seq_indent(seq, iter->depth+1);
|
seq_indent(seq, iter->depth+1);
|
||||||
seq_printf(seq, " /%d %s %s", li->plen,
|
seq_printf(seq, " /%d %s %s", li->plen,
|
||||||
rtn_scope(buf1, sizeof(buf1),
|
rtn_scope(buf1, sizeof(buf1),
|
||||||
fa->fa_scope),
|
fa->fa_info->fib_scope),
|
||||||
rtn_type(buf2, sizeof(buf2),
|
rtn_type(buf2, sizeof(buf2),
|
||||||
fa->fa_type));
|
fa->fa_type));
|
||||||
if (fa->fa_tos)
|
if (fa->fa_tos)
|
||||||
|
@ -1593,8 +1593,6 @@ static void ip_rt_update_pmtu(struct dst_entry *dst, u32 mtu)
|
|||||||
rt->rt_peer_genid = rt_peer_genid();
|
rt->rt_peer_genid = rt_peer_genid();
|
||||||
}
|
}
|
||||||
check_peer_pmtu(dst, peer);
|
check_peer_pmtu(dst, peer);
|
||||||
|
|
||||||
inet_putpeer(peer);
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -1720,7 +1718,7 @@ void ip_rt_get_source(u8 *addr, struct rtable *rt)
|
|||||||
|
|
||||||
rcu_read_lock();
|
rcu_read_lock();
|
||||||
if (fib_lookup(dev_net(rt->dst.dev), &fl4, &res) == 0)
|
if (fib_lookup(dev_net(rt->dst.dev), &fl4, &res) == 0)
|
||||||
src = FIB_RES_PREFSRC(res);
|
src = FIB_RES_PREFSRC(dev_net(rt->dst.dev), res);
|
||||||
else
|
else
|
||||||
src = inet_select_addr(rt->dst.dev, rt->rt_gateway,
|
src = inet_select_addr(rt->dst.dev, rt->rt_gateway,
|
||||||
RT_SCOPE_UNIVERSE);
|
RT_SCOPE_UNIVERSE);
|
||||||
@ -2617,7 +2615,7 @@ static struct rtable *ip_route_output_slow(struct net *net,
|
|||||||
fib_select_default(&res);
|
fib_select_default(&res);
|
||||||
|
|
||||||
if (!fl4.saddr)
|
if (!fl4.saddr)
|
||||||
fl4.saddr = FIB_RES_PREFSRC(res);
|
fl4.saddr = FIB_RES_PREFSRC(net, res);
|
||||||
|
|
||||||
dev_out = FIB_RES_DEV(res);
|
dev_out = FIB_RES_DEV(res);
|
||||||
fl4.flowi4_oif = dev_out->ifindex;
|
fl4.flowi4_oif = dev_out->ifindex;
|
||||||
@ -3221,6 +3219,8 @@ static __net_init int rt_genid_init(struct net *net)
|
|||||||
{
|
{
|
||||||
get_random_bytes(&net->ipv4.rt_genid,
|
get_random_bytes(&net->ipv4.rt_genid,
|
||||||
sizeof(net->ipv4.rt_genid));
|
sizeof(net->ipv4.rt_genid));
|
||||||
|
get_random_bytes(&net->ipv4.dev_addr_genid,
|
||||||
|
sizeof(net->ipv4.dev_addr_genid));
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -2659,7 +2659,7 @@ static void DBGUNDO(struct sock *sk, const char *msg)
|
|||||||
#define DBGUNDO(x...) do { } while (0)
|
#define DBGUNDO(x...) do { } while (0)
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
static void tcp_undo_cwr(struct sock *sk, const int undo)
|
static void tcp_undo_cwr(struct sock *sk, const bool undo_ssthresh)
|
||||||
{
|
{
|
||||||
struct tcp_sock *tp = tcp_sk(sk);
|
struct tcp_sock *tp = tcp_sk(sk);
|
||||||
|
|
||||||
@ -2671,14 +2671,13 @@ static void tcp_undo_cwr(struct sock *sk, const int undo)
|
|||||||
else
|
else
|
||||||
tp->snd_cwnd = max(tp->snd_cwnd, tp->snd_ssthresh << 1);
|
tp->snd_cwnd = max(tp->snd_cwnd, tp->snd_ssthresh << 1);
|
||||||
|
|
||||||
if (undo && tp->prior_ssthresh > tp->snd_ssthresh) {
|
if (undo_ssthresh && tp->prior_ssthresh > tp->snd_ssthresh) {
|
||||||
tp->snd_ssthresh = tp->prior_ssthresh;
|
tp->snd_ssthresh = tp->prior_ssthresh;
|
||||||
TCP_ECN_withdraw_cwr(tp);
|
TCP_ECN_withdraw_cwr(tp);
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
tp->snd_cwnd = max(tp->snd_cwnd, tp->snd_ssthresh);
|
tp->snd_cwnd = max(tp->snd_cwnd, tp->snd_ssthresh);
|
||||||
}
|
}
|
||||||
tcp_moderate_cwnd(tp);
|
|
||||||
tp->snd_cwnd_stamp = tcp_time_stamp;
|
tp->snd_cwnd_stamp = tcp_time_stamp;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -2699,7 +2698,7 @@ static int tcp_try_undo_recovery(struct sock *sk)
|
|||||||
* or our original transmission succeeded.
|
* or our original transmission succeeded.
|
||||||
*/
|
*/
|
||||||
DBGUNDO(sk, inet_csk(sk)->icsk_ca_state == TCP_CA_Loss ? "loss" : "retrans");
|
DBGUNDO(sk, inet_csk(sk)->icsk_ca_state == TCP_CA_Loss ? "loss" : "retrans");
|
||||||
tcp_undo_cwr(sk, 1);
|
tcp_undo_cwr(sk, true);
|
||||||
if (inet_csk(sk)->icsk_ca_state == TCP_CA_Loss)
|
if (inet_csk(sk)->icsk_ca_state == TCP_CA_Loss)
|
||||||
mib_idx = LINUX_MIB_TCPLOSSUNDO;
|
mib_idx = LINUX_MIB_TCPLOSSUNDO;
|
||||||
else
|
else
|
||||||
@ -2726,7 +2725,7 @@ static void tcp_try_undo_dsack(struct sock *sk)
|
|||||||
|
|
||||||
if (tp->undo_marker && !tp->undo_retrans) {
|
if (tp->undo_marker && !tp->undo_retrans) {
|
||||||
DBGUNDO(sk, "D-SACK");
|
DBGUNDO(sk, "D-SACK");
|
||||||
tcp_undo_cwr(sk, 1);
|
tcp_undo_cwr(sk, true);
|
||||||
tp->undo_marker = 0;
|
tp->undo_marker = 0;
|
||||||
NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPDSACKUNDO);
|
NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPDSACKUNDO);
|
||||||
}
|
}
|
||||||
@ -2779,7 +2778,7 @@ static int tcp_try_undo_partial(struct sock *sk, int acked)
|
|||||||
tcp_update_reordering(sk, tcp_fackets_out(tp) + acked, 1);
|
tcp_update_reordering(sk, tcp_fackets_out(tp) + acked, 1);
|
||||||
|
|
||||||
DBGUNDO(sk, "Hoe");
|
DBGUNDO(sk, "Hoe");
|
||||||
tcp_undo_cwr(sk, 0);
|
tcp_undo_cwr(sk, false);
|
||||||
NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPPARTIALUNDO);
|
NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPPARTIALUNDO);
|
||||||
|
|
||||||
/* So... Do not make Hoe's retransmit yet.
|
/* So... Do not make Hoe's retransmit yet.
|
||||||
@ -2808,7 +2807,7 @@ static int tcp_try_undo_loss(struct sock *sk)
|
|||||||
|
|
||||||
DBGUNDO(sk, "partial loss");
|
DBGUNDO(sk, "partial loss");
|
||||||
tp->lost_out = 0;
|
tp->lost_out = 0;
|
||||||
tcp_undo_cwr(sk, 1);
|
tcp_undo_cwr(sk, true);
|
||||||
NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPLOSSUNDO);
|
NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPLOSSUNDO);
|
||||||
inet_csk(sk)->icsk_retransmits = 0;
|
inet_csk(sk)->icsk_retransmits = 0;
|
||||||
tp->undo_marker = 0;
|
tp->undo_marker = 0;
|
||||||
@ -2822,8 +2821,11 @@ static int tcp_try_undo_loss(struct sock *sk)
|
|||||||
static inline void tcp_complete_cwr(struct sock *sk)
|
static inline void tcp_complete_cwr(struct sock *sk)
|
||||||
{
|
{
|
||||||
struct tcp_sock *tp = tcp_sk(sk);
|
struct tcp_sock *tp = tcp_sk(sk);
|
||||||
tp->snd_cwnd = min(tp->snd_cwnd, tp->snd_ssthresh);
|
/* Do not moderate cwnd if it's already undone in cwr or recovery */
|
||||||
tp->snd_cwnd_stamp = tcp_time_stamp;
|
if (tp->undo_marker && tp->snd_cwnd > tp->snd_ssthresh) {
|
||||||
|
tp->snd_cwnd = tp->snd_ssthresh;
|
||||||
|
tp->snd_cwnd_stamp = tcp_time_stamp;
|
||||||
|
}
|
||||||
tcp_ca_event(sk, CA_EVENT_COMPLETE_CWR);
|
tcp_ca_event(sk, CA_EVENT_COMPLETE_CWR);
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -3494,7 +3496,7 @@ static void tcp_undo_spur_to_response(struct sock *sk, int flag)
|
|||||||
if (flag & FLAG_ECE)
|
if (flag & FLAG_ECE)
|
||||||
tcp_ratehalving_spur_to_response(sk);
|
tcp_ratehalving_spur_to_response(sk);
|
||||||
else
|
else
|
||||||
tcp_undo_cwr(sk, 1);
|
tcp_undo_cwr(sk, true);
|
||||||
}
|
}
|
||||||
|
|
||||||
/* F-RTO spurious RTO detection algorithm (RFC4138)
|
/* F-RTO spurious RTO detection algorithm (RFC4138)
|
||||||
|
@ -854,7 +854,7 @@ static struct rt6_info *ip6_pol_route_output(struct net *net, struct fib6_table
|
|||||||
return ip6_pol_route(net, table, fl6->flowi6_oif, fl6, flags);
|
return ip6_pol_route(net, table, fl6->flowi6_oif, fl6, flags);
|
||||||
}
|
}
|
||||||
|
|
||||||
struct dst_entry * ip6_route_output(struct net *net, struct sock *sk,
|
struct dst_entry * ip6_route_output(struct net *net, const struct sock *sk,
|
||||||
struct flowi6 *fl6)
|
struct flowi6 *fl6)
|
||||||
{
|
{
|
||||||
int flags = 0;
|
int flags = 0;
|
||||||
|
@ -243,6 +243,7 @@ struct sta_info *sta_info_alloc(struct ieee80211_sub_if_data *sdata,
|
|||||||
memcpy(sta->sta.addr, addr, ETH_ALEN);
|
memcpy(sta->sta.addr, addr, ETH_ALEN);
|
||||||
sta->local = local;
|
sta->local = local;
|
||||||
sta->sdata = sdata;
|
sta->sdata = sdata;
|
||||||
|
sta->last_rx = jiffies;
|
||||||
|
|
||||||
ewma_init(&sta->avg_signal, 1024, 8);
|
ewma_init(&sta->avg_signal, 1024, 8);
|
||||||
|
|
||||||
|
Loading…
Reference in New Issue
Block a user