Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net
Conflicts: drivers/net/Makefile net/ipv6/sysctl_net_ipv6.c Two ipv6_table_template[] additions overlap, so the index of the ipv6_table[x] assignments needed to be adjusted. In the drivers/net/Makefile case, we've gotten rid of the garbage whereby we had to list every single USB networking driver in the top-level Makefile, there is just one "USB_NETWORKING" that guards everything. Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
commit
d247b6ab3c
@ -125,7 +125,7 @@ int bond_sysfs_slave_add(struct slave *slave)
|
|||||||
for (a = slave_attrs; *a; ++a) {
|
for (a = slave_attrs; *a; ++a) {
|
||||||
err = sysfs_create_file(&slave->kobj, &((*a)->attr));
|
err = sysfs_create_file(&slave->kobj, &((*a)->attr));
|
||||||
if (err) {
|
if (err) {
|
||||||
kobject_del(&slave->kobj);
|
kobject_put(&slave->kobj);
|
||||||
return err;
|
return err;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -140,5 +140,5 @@ void bond_sysfs_slave_del(struct slave *slave)
|
|||||||
for (a = slave_attrs; *a; ++a)
|
for (a = slave_attrs; *a; ++a)
|
||||||
sysfs_remove_file(&slave->kobj, &((*a)->attr));
|
sysfs_remove_file(&slave->kobj, &((*a)->attr));
|
||||||
|
|
||||||
kobject_del(&slave->kobj);
|
kobject_put(&slave->kobj);
|
||||||
}
|
}
|
||||||
|
@ -633,8 +633,10 @@ static void emac_rx(struct net_device *dev)
|
|||||||
}
|
}
|
||||||
|
|
||||||
/* Move data from EMAC */
|
/* Move data from EMAC */
|
||||||
skb = dev_alloc_skb(rxlen + 4);
|
if (good_packet) {
|
||||||
if (good_packet && skb) {
|
skb = netdev_alloc_skb(dev, rxlen + 4);
|
||||||
|
if (!skb)
|
||||||
|
continue;
|
||||||
skb_reserve(skb, 2);
|
skb_reserve(skb, 2);
|
||||||
rdptr = (u8 *) skb_put(skb, rxlen - 4);
|
rdptr = (u8 *) skb_put(skb, rxlen - 4);
|
||||||
|
|
||||||
|
@ -7830,17 +7830,18 @@ static int tigon3_dma_hwbug_workaround(struct tg3_napi *tnapi,
|
|||||||
|
|
||||||
static netdev_tx_t tg3_start_xmit(struct sk_buff *, struct net_device *);
|
static netdev_tx_t tg3_start_xmit(struct sk_buff *, struct net_device *);
|
||||||
|
|
||||||
/* Use GSO to workaround a rare TSO bug that may be triggered when the
|
/* Use GSO to workaround all TSO packets that meet HW bug conditions
|
||||||
* TSO header is greater than 80 bytes.
|
* indicated in tg3_tx_frag_set()
|
||||||
*/
|
*/
|
||||||
static int tg3_tso_bug(struct tg3 *tp, struct sk_buff *skb)
|
static int tg3_tso_bug(struct tg3 *tp, struct tg3_napi *tnapi,
|
||||||
|
struct netdev_queue *txq, struct sk_buff *skb)
|
||||||
{
|
{
|
||||||
struct sk_buff *segs, *nskb;
|
struct sk_buff *segs, *nskb;
|
||||||
u32 frag_cnt_est = skb_shinfo(skb)->gso_segs * 3;
|
u32 frag_cnt_est = skb_shinfo(skb)->gso_segs * 3;
|
||||||
|
|
||||||
/* Estimate the number of fragments in the worst case */
|
/* Estimate the number of fragments in the worst case */
|
||||||
if (unlikely(tg3_tx_avail(&tp->napi[0]) <= frag_cnt_est)) {
|
if (unlikely(tg3_tx_avail(tnapi) <= frag_cnt_est)) {
|
||||||
netif_stop_queue(tp->dev);
|
netif_tx_stop_queue(txq);
|
||||||
|
|
||||||
/* netif_tx_stop_queue() must be done before checking
|
/* netif_tx_stop_queue() must be done before checking
|
||||||
* checking tx index in tg3_tx_avail() below, because in
|
* checking tx index in tg3_tx_avail() below, because in
|
||||||
@ -7848,13 +7849,14 @@ static int tg3_tso_bug(struct tg3 *tp, struct sk_buff *skb)
|
|||||||
* netif_tx_queue_stopped().
|
* netif_tx_queue_stopped().
|
||||||
*/
|
*/
|
||||||
smp_mb();
|
smp_mb();
|
||||||
if (tg3_tx_avail(&tp->napi[0]) <= frag_cnt_est)
|
if (tg3_tx_avail(tnapi) <= frag_cnt_est)
|
||||||
return NETDEV_TX_BUSY;
|
return NETDEV_TX_BUSY;
|
||||||
|
|
||||||
netif_wake_queue(tp->dev);
|
netif_tx_wake_queue(txq);
|
||||||
}
|
}
|
||||||
|
|
||||||
segs = skb_gso_segment(skb, tp->dev->features & ~(NETIF_F_TSO | NETIF_F_TSO6));
|
segs = skb_gso_segment(skb, tp->dev->features &
|
||||||
|
~(NETIF_F_TSO | NETIF_F_TSO6));
|
||||||
if (IS_ERR(segs) || !segs)
|
if (IS_ERR(segs) || !segs)
|
||||||
goto tg3_tso_bug_end;
|
goto tg3_tso_bug_end;
|
||||||
|
|
||||||
@ -7930,7 +7932,7 @@ static netdev_tx_t tg3_start_xmit(struct sk_buff *skb, struct net_device *dev)
|
|||||||
if (!skb_is_gso_v6(skb)) {
|
if (!skb_is_gso_v6(skb)) {
|
||||||
if (unlikely((ETH_HLEN + hdr_len) > 80) &&
|
if (unlikely((ETH_HLEN + hdr_len) > 80) &&
|
||||||
tg3_flag(tp, TSO_BUG))
|
tg3_flag(tp, TSO_BUG))
|
||||||
return tg3_tso_bug(tp, skb);
|
return tg3_tso_bug(tp, tnapi, txq, skb);
|
||||||
|
|
||||||
ip_csum = iph->check;
|
ip_csum = iph->check;
|
||||||
ip_tot_len = iph->tot_len;
|
ip_tot_len = iph->tot_len;
|
||||||
@ -8061,7 +8063,7 @@ static netdev_tx_t tg3_start_xmit(struct sk_buff *skb, struct net_device *dev)
|
|||||||
iph->tot_len = ip_tot_len;
|
iph->tot_len = ip_tot_len;
|
||||||
}
|
}
|
||||||
tcph->check = tcp_csum;
|
tcph->check = tcp_csum;
|
||||||
return tg3_tso_bug(tp, skb);
|
return tg3_tso_bug(tp, tnapi, txq, skb);
|
||||||
}
|
}
|
||||||
|
|
||||||
/* If the workaround fails due to memory/mapping
|
/* If the workaround fails due to memory/mapping
|
||||||
|
@ -600,9 +600,9 @@ bnad_cq_process(struct bnad *bnad, struct bna_ccb *ccb, int budget)
|
|||||||
prefetch(bnad->netdev);
|
prefetch(bnad->netdev);
|
||||||
|
|
||||||
cq = ccb->sw_q;
|
cq = ccb->sw_q;
|
||||||
cmpl = &cq[ccb->producer_index];
|
|
||||||
|
|
||||||
while (packets < budget) {
|
while (packets < budget) {
|
||||||
|
cmpl = &cq[ccb->producer_index];
|
||||||
if (!cmpl->valid)
|
if (!cmpl->valid)
|
||||||
break;
|
break;
|
||||||
/* The 'valid' field is set by the adapter, only after writing
|
/* The 'valid' field is set by the adapter, only after writing
|
||||||
|
@ -997,10 +997,8 @@ bnad_get_eeprom(struct net_device *netdev, struct ethtool_eeprom *eeprom,
|
|||||||
unsigned long flags = 0;
|
unsigned long flags = 0;
|
||||||
int ret = 0;
|
int ret = 0;
|
||||||
|
|
||||||
/* Check if the flash read request is valid */
|
/* Fill the magic value */
|
||||||
if (eeprom->magic != (bnad->pcidev->vendor |
|
eeprom->magic = bnad->pcidev->vendor | (bnad->pcidev->device << 16);
|
||||||
(bnad->pcidev->device << 16)))
|
|
||||||
return -EFAULT;
|
|
||||||
|
|
||||||
/* Query the flash partition based on the offset */
|
/* Query the flash partition based on the offset */
|
||||||
flash_part = bnad_get_flash_partition_by_offset(bnad,
|
flash_part = bnad_get_flash_partition_by_offset(bnad,
|
||||||
|
@ -136,7 +136,7 @@ int qlcnic_82xx_issue_cmd(struct qlcnic_adapter *adapter,
|
|||||||
rsp = qlcnic_poll_rsp(adapter);
|
rsp = qlcnic_poll_rsp(adapter);
|
||||||
|
|
||||||
if (rsp == QLCNIC_CDRP_RSP_TIMEOUT) {
|
if (rsp == QLCNIC_CDRP_RSP_TIMEOUT) {
|
||||||
dev_err(&pdev->dev, "card response timeout.\n");
|
dev_err(&pdev->dev, "command timeout, response = 0x%x\n", rsp);
|
||||||
cmd->rsp.arg[0] = QLCNIC_RCODE_TIMEOUT;
|
cmd->rsp.arg[0] = QLCNIC_RCODE_TIMEOUT;
|
||||||
} else if (rsp == QLCNIC_CDRP_RSP_FAIL) {
|
} else if (rsp == QLCNIC_CDRP_RSP_FAIL) {
|
||||||
cmd->rsp.arg[0] = QLCRD32(adapter, QLCNIC_CDRP_ARG(1), &err);
|
cmd->rsp.arg[0] = QLCRD32(adapter, QLCNIC_CDRP_ARG(1), &err);
|
||||||
|
@ -1290,17 +1290,25 @@ static u64 *qlcnic_fill_stats(u64 *data, void *stats, int type)
|
|||||||
|
|
||||||
void qlcnic_update_stats(struct qlcnic_adapter *adapter)
|
void qlcnic_update_stats(struct qlcnic_adapter *adapter)
|
||||||
{
|
{
|
||||||
|
struct qlcnic_tx_queue_stats tx_stats;
|
||||||
struct qlcnic_host_tx_ring *tx_ring;
|
struct qlcnic_host_tx_ring *tx_ring;
|
||||||
int ring;
|
int ring;
|
||||||
|
|
||||||
|
memset(&tx_stats, 0, sizeof(tx_stats));
|
||||||
for (ring = 0; ring < adapter->drv_tx_rings; ring++) {
|
for (ring = 0; ring < adapter->drv_tx_rings; ring++) {
|
||||||
tx_ring = &adapter->tx_ring[ring];
|
tx_ring = &adapter->tx_ring[ring];
|
||||||
adapter->stats.xmit_on += tx_ring->tx_stats.xmit_on;
|
tx_stats.xmit_on += tx_ring->tx_stats.xmit_on;
|
||||||
adapter->stats.xmit_off += tx_ring->tx_stats.xmit_off;
|
tx_stats.xmit_off += tx_ring->tx_stats.xmit_off;
|
||||||
adapter->stats.xmitcalled += tx_ring->tx_stats.xmit_called;
|
tx_stats.xmit_called += tx_ring->tx_stats.xmit_called;
|
||||||
adapter->stats.xmitfinished += tx_ring->tx_stats.xmit_finished;
|
tx_stats.xmit_finished += tx_ring->tx_stats.xmit_finished;
|
||||||
adapter->stats.txbytes += tx_ring->tx_stats.tx_bytes;
|
tx_stats.tx_bytes += tx_ring->tx_stats.tx_bytes;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
adapter->stats.xmit_on = tx_stats.xmit_on;
|
||||||
|
adapter->stats.xmit_off = tx_stats.xmit_off;
|
||||||
|
adapter->stats.xmitcalled = tx_stats.xmit_called;
|
||||||
|
adapter->stats.xmitfinished = tx_stats.xmit_finished;
|
||||||
|
adapter->stats.txbytes = tx_stats.tx_bytes;
|
||||||
}
|
}
|
||||||
|
|
||||||
static u64 *qlcnic_fill_tx_queue_stats(u64 *data, void *stats)
|
static u64 *qlcnic_fill_tx_queue_stats(u64 *data, void *stats)
|
||||||
|
@ -2324,14 +2324,14 @@ qlcnic_setup_netdev(struct qlcnic_adapter *adapter, struct net_device *netdev,
|
|||||||
if (err)
|
if (err)
|
||||||
return err;
|
return err;
|
||||||
|
|
||||||
|
qlcnic_dcb_init_dcbnl_ops(adapter->dcb);
|
||||||
|
|
||||||
err = register_netdev(netdev);
|
err = register_netdev(netdev);
|
||||||
if (err) {
|
if (err) {
|
||||||
dev_err(&pdev->dev, "failed to register net device\n");
|
dev_err(&pdev->dev, "failed to register net device\n");
|
||||||
return err;
|
return err;
|
||||||
}
|
}
|
||||||
|
|
||||||
qlcnic_dcb_init_dcbnl_ops(adapter->dcb);
|
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -2624,13 +2624,13 @@ qlcnic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
|
|||||||
if (err)
|
if (err)
|
||||||
goto err_out_disable_mbx_intr;
|
goto err_out_disable_mbx_intr;
|
||||||
|
|
||||||
|
if (adapter->portnum == 0)
|
||||||
|
qlcnic_set_drv_version(adapter);
|
||||||
|
|
||||||
err = qlcnic_setup_netdev(adapter, netdev, pci_using_dac);
|
err = qlcnic_setup_netdev(adapter, netdev, pci_using_dac);
|
||||||
if (err)
|
if (err)
|
||||||
goto err_out_disable_mbx_intr;
|
goto err_out_disable_mbx_intr;
|
||||||
|
|
||||||
if (adapter->portnum == 0)
|
|
||||||
qlcnic_set_drv_version(adapter);
|
|
||||||
|
|
||||||
pci_set_drvdata(pdev, adapter);
|
pci_set_drvdata(pdev, adapter);
|
||||||
|
|
||||||
if (qlcnic_82xx_check(adapter))
|
if (qlcnic_82xx_check(adapter))
|
||||||
|
@ -646,6 +646,7 @@ static int macvlan_init(struct net_device *dev)
|
|||||||
(lowerdev->state & MACVLAN_STATE_MASK);
|
(lowerdev->state & MACVLAN_STATE_MASK);
|
||||||
dev->features = lowerdev->features & MACVLAN_FEATURES;
|
dev->features = lowerdev->features & MACVLAN_FEATURES;
|
||||||
dev->features |= ALWAYS_ON_FEATURES;
|
dev->features |= ALWAYS_ON_FEATURES;
|
||||||
|
dev->vlan_features = lowerdev->vlan_features & MACVLAN_FEATURES;
|
||||||
dev->gso_max_size = lowerdev->gso_max_size;
|
dev->gso_max_size = lowerdev->gso_max_size;
|
||||||
dev->iflink = lowerdev->ifindex;
|
dev->iflink = lowerdev->ifindex;
|
||||||
dev->hard_header_len = lowerdev->hard_header_len;
|
dev->hard_header_len = lowerdev->hard_header_len;
|
||||||
|
@ -255,7 +255,6 @@ int mdiobus_register(struct mii_bus *bus)
|
|||||||
|
|
||||||
bus->dev.parent = bus->parent;
|
bus->dev.parent = bus->parent;
|
||||||
bus->dev.class = &mdio_bus_class;
|
bus->dev.class = &mdio_bus_class;
|
||||||
bus->dev.driver = bus->parent->driver;
|
|
||||||
bus->dev.groups = NULL;
|
bus->dev.groups = NULL;
|
||||||
dev_set_name(&bus->dev, "%s", bus->id);
|
dev_set_name(&bus->dev, "%s", bus->id);
|
||||||
|
|
||||||
|
@ -85,14 +85,28 @@ static int always_connected (struct usbnet *dev)
|
|||||||
*
|
*
|
||||||
*-------------------------------------------------------------------------*/
|
*-------------------------------------------------------------------------*/
|
||||||
|
|
||||||
|
static void m5632_recover(struct usbnet *dev)
|
||||||
|
{
|
||||||
|
struct usb_device *udev = dev->udev;
|
||||||
|
struct usb_interface *intf = dev->intf;
|
||||||
|
int r;
|
||||||
|
|
||||||
|
r = usb_lock_device_for_reset(udev, intf);
|
||||||
|
if (r < 0)
|
||||||
|
return;
|
||||||
|
|
||||||
|
usb_reset_device(udev);
|
||||||
|
usb_unlock_device(udev);
|
||||||
|
}
|
||||||
|
|
||||||
static const struct driver_info ali_m5632_info = {
|
static const struct driver_info ali_m5632_info = {
|
||||||
.description = "ALi M5632",
|
.description = "ALi M5632",
|
||||||
.flags = FLAG_POINTTOPOINT,
|
.flags = FLAG_POINTTOPOINT,
|
||||||
|
.recover = m5632_recover,
|
||||||
};
|
};
|
||||||
|
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
|
|
||||||
#ifdef CONFIG_USB_AN2720
|
#ifdef CONFIG_USB_AN2720
|
||||||
#define HAVE_HARDWARE
|
#define HAVE_HARDWARE
|
||||||
|
|
||||||
@ -326,12 +340,23 @@ static const struct usb_device_id products [] = {
|
|||||||
MODULE_DEVICE_TABLE(usb, products);
|
MODULE_DEVICE_TABLE(usb, products);
|
||||||
|
|
||||||
/*-------------------------------------------------------------------------*/
|
/*-------------------------------------------------------------------------*/
|
||||||
|
static int dummy_prereset(struct usb_interface *intf)
|
||||||
|
{
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
static int dummy_postreset(struct usb_interface *intf)
|
||||||
|
{
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
static struct usb_driver cdc_subset_driver = {
|
static struct usb_driver cdc_subset_driver = {
|
||||||
.name = "cdc_subset",
|
.name = "cdc_subset",
|
||||||
.probe = usbnet_probe,
|
.probe = usbnet_probe,
|
||||||
.suspend = usbnet_suspend,
|
.suspend = usbnet_suspend,
|
||||||
.resume = usbnet_resume,
|
.resume = usbnet_resume,
|
||||||
|
.pre_reset = dummy_prereset,
|
||||||
|
.post_reset = dummy_postreset,
|
||||||
.disconnect = usbnet_disconnect,
|
.disconnect = usbnet_disconnect,
|
||||||
.id_table = products,
|
.id_table = products,
|
||||||
.disable_hub_initiated_lpm = 1,
|
.disable_hub_initiated_lpm = 1,
|
||||||
|
@ -1218,8 +1218,12 @@ void usbnet_tx_timeout (struct net_device *net)
|
|||||||
|
|
||||||
unlink_urbs (dev, &dev->txq);
|
unlink_urbs (dev, &dev->txq);
|
||||||
tasklet_schedule (&dev->bh);
|
tasklet_schedule (&dev->bh);
|
||||||
|
/* this needs to be handled individually because the generic layer
|
||||||
// FIXME: device recovery -- reset?
|
* doesn't know what is sufficient and could not restore private
|
||||||
|
* information if a remedy of an unconditional reset were used.
|
||||||
|
*/
|
||||||
|
if (dev->driver_info->recover)
|
||||||
|
(dev->driver_info->recover)(dev);
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL_GPL(usbnet_tx_timeout);
|
EXPORT_SYMBOL_GPL(usbnet_tx_timeout);
|
||||||
|
|
||||||
|
@ -1196,22 +1196,6 @@ static void xennet_release_rx_bufs(struct netfront_queue *queue)
|
|||||||
spin_unlock_bh(&queue->rx_lock);
|
spin_unlock_bh(&queue->rx_lock);
|
||||||
}
|
}
|
||||||
|
|
||||||
static void xennet_uninit(struct net_device *dev)
|
|
||||||
{
|
|
||||||
struct netfront_info *np = netdev_priv(dev);
|
|
||||||
unsigned int num_queues = dev->real_num_tx_queues;
|
|
||||||
struct netfront_queue *queue;
|
|
||||||
unsigned int i;
|
|
||||||
|
|
||||||
for (i = 0; i < num_queues; ++i) {
|
|
||||||
queue = &np->queues[i];
|
|
||||||
xennet_release_tx_bufs(queue);
|
|
||||||
xennet_release_rx_bufs(queue);
|
|
||||||
gnttab_free_grant_references(queue->gref_tx_head);
|
|
||||||
gnttab_free_grant_references(queue->gref_rx_head);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
static netdev_features_t xennet_fix_features(struct net_device *dev,
|
static netdev_features_t xennet_fix_features(struct net_device *dev,
|
||||||
netdev_features_t features)
|
netdev_features_t features)
|
||||||
{
|
{
|
||||||
@ -1313,7 +1297,6 @@ static void xennet_poll_controller(struct net_device *dev)
|
|||||||
|
|
||||||
static const struct net_device_ops xennet_netdev_ops = {
|
static const struct net_device_ops xennet_netdev_ops = {
|
||||||
.ndo_open = xennet_open,
|
.ndo_open = xennet_open,
|
||||||
.ndo_uninit = xennet_uninit,
|
|
||||||
.ndo_stop = xennet_close,
|
.ndo_stop = xennet_close,
|
||||||
.ndo_start_xmit = xennet_start_xmit,
|
.ndo_start_xmit = xennet_start_xmit,
|
||||||
.ndo_change_mtu = xennet_change_mtu,
|
.ndo_change_mtu = xennet_change_mtu,
|
||||||
@ -1455,6 +1438,11 @@ static void xennet_disconnect_backend(struct netfront_info *info)
|
|||||||
|
|
||||||
napi_synchronize(&queue->napi);
|
napi_synchronize(&queue->napi);
|
||||||
|
|
||||||
|
xennet_release_tx_bufs(queue);
|
||||||
|
xennet_release_rx_bufs(queue);
|
||||||
|
gnttab_free_grant_references(queue->gref_tx_head);
|
||||||
|
gnttab_free_grant_references(queue->gref_rx_head);
|
||||||
|
|
||||||
/* End access and free the pages */
|
/* End access and free the pages */
|
||||||
xennet_end_access(queue->tx_ring_ref, queue->tx.sring);
|
xennet_end_access(queue->tx_ring_ref, queue->tx.sring);
|
||||||
xennet_end_access(queue->rx_ring_ref, queue->rx.sring);
|
xennet_end_access(queue->rx_ring_ref, queue->rx.sring);
|
||||||
@ -1827,8 +1815,8 @@ static int xennet_create_queues(struct netfront_info *info,
|
|||||||
|
|
||||||
ret = xennet_init_queue(queue);
|
ret = xennet_init_queue(queue);
|
||||||
if (ret < 0) {
|
if (ret < 0) {
|
||||||
dev_warn(&info->netdev->dev, "only created %d queues\n",
|
dev_warn(&info->netdev->dev,
|
||||||
num_queues);
|
"only created %d queues\n", i);
|
||||||
num_queues = i;
|
num_queues = i;
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
@ -2001,7 +1989,7 @@ abort_transaction_no_dev_fatal:
|
|||||||
info->queues = NULL;
|
info->queues = NULL;
|
||||||
rtnl_lock();
|
rtnl_lock();
|
||||||
netif_set_real_num_tx_queues(info->netdev, 0);
|
netif_set_real_num_tx_queues(info->netdev, 0);
|
||||||
rtnl_lock();
|
rtnl_unlock();
|
||||||
out:
|
out:
|
||||||
return err;
|
return err;
|
||||||
}
|
}
|
||||||
@ -2010,10 +1998,7 @@ static int xennet_connect(struct net_device *dev)
|
|||||||
{
|
{
|
||||||
struct netfront_info *np = netdev_priv(dev);
|
struct netfront_info *np = netdev_priv(dev);
|
||||||
unsigned int num_queues = 0;
|
unsigned int num_queues = 0;
|
||||||
int i, requeue_idx, err;
|
int err;
|
||||||
struct sk_buff *skb;
|
|
||||||
grant_ref_t ref;
|
|
||||||
struct xen_netif_rx_request *req;
|
|
||||||
unsigned int feature_rx_copy;
|
unsigned int feature_rx_copy;
|
||||||
unsigned int j = 0;
|
unsigned int j = 0;
|
||||||
struct netfront_queue *queue = NULL;
|
struct netfront_queue *queue = NULL;
|
||||||
@ -2040,47 +2025,8 @@ static int xennet_connect(struct net_device *dev)
|
|||||||
netdev_update_features(dev);
|
netdev_update_features(dev);
|
||||||
rtnl_unlock();
|
rtnl_unlock();
|
||||||
|
|
||||||
/* By now, the queue structures have been set up */
|
|
||||||
for (j = 0; j < num_queues; ++j) {
|
|
||||||
queue = &np->queues[j];
|
|
||||||
|
|
||||||
/* Step 1: Discard all pending TX packet fragments. */
|
|
||||||
spin_lock_irq(&queue->tx_lock);
|
|
||||||
xennet_release_tx_bufs(queue);
|
|
||||||
spin_unlock_irq(&queue->tx_lock);
|
|
||||||
|
|
||||||
/* Step 2: Rebuild the RX buffer freelist and the RX ring itself. */
|
|
||||||
spin_lock_bh(&queue->rx_lock);
|
|
||||||
|
|
||||||
for (requeue_idx = 0, i = 0; i < NET_RX_RING_SIZE; i++) {
|
|
||||||
skb_frag_t *frag;
|
|
||||||
const struct page *page;
|
|
||||||
if (!queue->rx_skbs[i])
|
|
||||||
continue;
|
|
||||||
|
|
||||||
skb = queue->rx_skbs[requeue_idx] = xennet_get_rx_skb(queue, i);
|
|
||||||
ref = queue->grant_rx_ref[requeue_idx] = xennet_get_rx_ref(queue, i);
|
|
||||||
req = RING_GET_REQUEST(&queue->rx, requeue_idx);
|
|
||||||
|
|
||||||
frag = &skb_shinfo(skb)->frags[0];
|
|
||||||
page = skb_frag_page(frag);
|
|
||||||
gnttab_grant_foreign_access_ref(
|
|
||||||
ref, queue->info->xbdev->otherend_id,
|
|
||||||
pfn_to_mfn(page_to_pfn(page)),
|
|
||||||
0);
|
|
||||||
req->gref = ref;
|
|
||||||
req->id = requeue_idx;
|
|
||||||
|
|
||||||
requeue_idx++;
|
|
||||||
}
|
|
||||||
|
|
||||||
queue->rx.req_prod_pvt = requeue_idx;
|
|
||||||
|
|
||||||
spin_unlock_bh(&queue->rx_lock);
|
|
||||||
}
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Step 3: All public and private state should now be sane. Get
|
* All public and private state should now be sane. Get
|
||||||
* ready to start sending and receiving packets and give the driver
|
* ready to start sending and receiving packets and give the driver
|
||||||
* domain a kick because we've probably just requeued some
|
* domain a kick because we've probably just requeued some
|
||||||
* packets.
|
* packets.
|
||||||
|
@ -148,6 +148,9 @@ struct driver_info {
|
|||||||
struct sk_buff *(*tx_fixup)(struct usbnet *dev,
|
struct sk_buff *(*tx_fixup)(struct usbnet *dev,
|
||||||
struct sk_buff *skb, gfp_t flags);
|
struct sk_buff *skb, gfp_t flags);
|
||||||
|
|
||||||
|
/* recover from timeout */
|
||||||
|
void (*recover)(struct usbnet *dev);
|
||||||
|
|
||||||
/* early initialization code, can sleep. This is for minidrivers
|
/* early initialization code, can sleep. This is for minidrivers
|
||||||
* having 'subminidrivers' that need to do extra initialization
|
* having 'subminidrivers' that need to do extra initialization
|
||||||
* right after minidriver have initialized hardware. */
|
* right after minidriver have initialized hardware. */
|
||||||
|
@ -40,6 +40,7 @@ struct ip_tunnel_prl_entry {
|
|||||||
|
|
||||||
struct ip_tunnel_dst {
|
struct ip_tunnel_dst {
|
||||||
struct dst_entry __rcu *dst;
|
struct dst_entry __rcu *dst;
|
||||||
|
__be32 saddr;
|
||||||
};
|
};
|
||||||
|
|
||||||
struct ip_tunnel {
|
struct ip_tunnel {
|
||||||
|
@ -85,6 +85,10 @@ EXPORT_SYMBOL(memcpy_toiovecend);
|
|||||||
int memcpy_fromiovecend(unsigned char *kdata, const struct iovec *iov,
|
int memcpy_fromiovecend(unsigned char *kdata, const struct iovec *iov,
|
||||||
int offset, int len)
|
int offset, int len)
|
||||||
{
|
{
|
||||||
|
/* No data? Done! */
|
||||||
|
if (len == 0)
|
||||||
|
return 0;
|
||||||
|
|
||||||
/* Skip over the finished iovecs */
|
/* Skip over the finished iovecs */
|
||||||
while (offset >= iov->iov_len) {
|
while (offset >= iov->iov_len) {
|
||||||
offset -= iov->iov_len;
|
offset -= iov->iov_len;
|
||||||
|
@ -128,6 +128,7 @@ static bool batadv_frag_insert_packet(struct batadv_orig_node *orig_node,
|
|||||||
{
|
{
|
||||||
struct batadv_frag_table_entry *chain;
|
struct batadv_frag_table_entry *chain;
|
||||||
struct batadv_frag_list_entry *frag_entry_new = NULL, *frag_entry_curr;
|
struct batadv_frag_list_entry *frag_entry_new = NULL, *frag_entry_curr;
|
||||||
|
struct batadv_frag_list_entry *frag_entry_last = NULL;
|
||||||
struct batadv_frag_packet *frag_packet;
|
struct batadv_frag_packet *frag_packet;
|
||||||
uint8_t bucket;
|
uint8_t bucket;
|
||||||
uint16_t seqno, hdr_size = sizeof(struct batadv_frag_packet);
|
uint16_t seqno, hdr_size = sizeof(struct batadv_frag_packet);
|
||||||
@ -180,11 +181,14 @@ static bool batadv_frag_insert_packet(struct batadv_orig_node *orig_node,
|
|||||||
ret = true;
|
ret = true;
|
||||||
goto out;
|
goto out;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/* store current entry because it could be the last in list */
|
||||||
|
frag_entry_last = frag_entry_curr;
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Reached the end of the list, so insert after 'frag_entry_curr'. */
|
/* Reached the end of the list, so insert after 'frag_entry_last'. */
|
||||||
if (likely(frag_entry_curr)) {
|
if (likely(frag_entry_last)) {
|
||||||
hlist_add_after(&frag_entry_curr->list, &frag_entry_new->list);
|
hlist_add_after(&frag_entry_last->list, &frag_entry_new->list);
|
||||||
chain->size += skb->len - hdr_size;
|
chain->size += skb->len - hdr_size;
|
||||||
chain->timestamp = jiffies;
|
chain->timestamp = jiffies;
|
||||||
ret = true;
|
ret = true;
|
||||||
|
@ -629,7 +629,7 @@ static int fdb_fill_info(struct sk_buff *skb, const struct net_bridge *br,
|
|||||||
if (nla_put(skb, NDA_CACHEINFO, sizeof(ci), &ci))
|
if (nla_put(skb, NDA_CACHEINFO, sizeof(ci), &ci))
|
||||||
goto nla_put_failure;
|
goto nla_put_failure;
|
||||||
|
|
||||||
if (nla_put(skb, NDA_VLAN, sizeof(u16), &fdb->vlan_id))
|
if (fdb->vlan_id && nla_put(skb, NDA_VLAN, sizeof(u16), &fdb->vlan_id))
|
||||||
goto nla_put_failure;
|
goto nla_put_failure;
|
||||||
|
|
||||||
return nlmsg_end(skb, nlh);
|
return nlmsg_end(skb, nlh);
|
||||||
|
@ -2976,9 +2976,9 @@ struct sk_buff *skb_segment(struct sk_buff *head_skb,
|
|||||||
tail = nskb;
|
tail = nskb;
|
||||||
|
|
||||||
__copy_skb_header(nskb, head_skb);
|
__copy_skb_header(nskb, head_skb);
|
||||||
nskb->mac_len = head_skb->mac_len;
|
|
||||||
|
|
||||||
skb_headers_offset_update(nskb, skb_headroom(nskb) - headroom);
|
skb_headers_offset_update(nskb, skb_headroom(nskb) - headroom);
|
||||||
|
skb_reset_mac_len(nskb);
|
||||||
|
|
||||||
skb_copy_from_linear_data_offset(head_skb, -tnl_hlen,
|
skb_copy_from_linear_data_offset(head_skb, -tnl_hlen,
|
||||||
nskb->data - tnl_hlen,
|
nskb->data - tnl_hlen,
|
||||||
|
@ -69,23 +69,25 @@ static unsigned int ip_tunnel_hash(__be32 key, __be32 remote)
|
|||||||
}
|
}
|
||||||
|
|
||||||
static void __tunnel_dst_set(struct ip_tunnel_dst *idst,
|
static void __tunnel_dst_set(struct ip_tunnel_dst *idst,
|
||||||
struct dst_entry *dst)
|
struct dst_entry *dst, __be32 saddr)
|
||||||
{
|
{
|
||||||
struct dst_entry *old_dst;
|
struct dst_entry *old_dst;
|
||||||
|
|
||||||
dst_clone(dst);
|
dst_clone(dst);
|
||||||
old_dst = xchg((__force struct dst_entry **)&idst->dst, dst);
|
old_dst = xchg((__force struct dst_entry **)&idst->dst, dst);
|
||||||
dst_release(old_dst);
|
dst_release(old_dst);
|
||||||
|
idst->saddr = saddr;
|
||||||
}
|
}
|
||||||
|
|
||||||
static void tunnel_dst_set(struct ip_tunnel *t, struct dst_entry *dst)
|
static void tunnel_dst_set(struct ip_tunnel *t,
|
||||||
|
struct dst_entry *dst, __be32 saddr)
|
||||||
{
|
{
|
||||||
__tunnel_dst_set(this_cpu_ptr(t->dst_cache), dst);
|
__tunnel_dst_set(this_cpu_ptr(t->dst_cache), dst, saddr);
|
||||||
}
|
}
|
||||||
|
|
||||||
static void tunnel_dst_reset(struct ip_tunnel *t)
|
static void tunnel_dst_reset(struct ip_tunnel *t)
|
||||||
{
|
{
|
||||||
tunnel_dst_set(t, NULL);
|
tunnel_dst_set(t, NULL, 0);
|
||||||
}
|
}
|
||||||
|
|
||||||
void ip_tunnel_dst_reset_all(struct ip_tunnel *t)
|
void ip_tunnel_dst_reset_all(struct ip_tunnel *t)
|
||||||
@ -93,20 +95,25 @@ void ip_tunnel_dst_reset_all(struct ip_tunnel *t)
|
|||||||
int i;
|
int i;
|
||||||
|
|
||||||
for_each_possible_cpu(i)
|
for_each_possible_cpu(i)
|
||||||
__tunnel_dst_set(per_cpu_ptr(t->dst_cache, i), NULL);
|
__tunnel_dst_set(per_cpu_ptr(t->dst_cache, i), NULL, 0);
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL(ip_tunnel_dst_reset_all);
|
EXPORT_SYMBOL(ip_tunnel_dst_reset_all);
|
||||||
|
|
||||||
static struct rtable *tunnel_rtable_get(struct ip_tunnel *t, u32 cookie)
|
static struct rtable *tunnel_rtable_get(struct ip_tunnel *t,
|
||||||
|
u32 cookie, __be32 *saddr)
|
||||||
{
|
{
|
||||||
|
struct ip_tunnel_dst *idst;
|
||||||
struct dst_entry *dst;
|
struct dst_entry *dst;
|
||||||
|
|
||||||
rcu_read_lock();
|
rcu_read_lock();
|
||||||
dst = rcu_dereference(this_cpu_ptr(t->dst_cache)->dst);
|
idst = this_cpu_ptr(t->dst_cache);
|
||||||
|
dst = rcu_dereference(idst->dst);
|
||||||
if (dst && !atomic_inc_not_zero(&dst->__refcnt))
|
if (dst && !atomic_inc_not_zero(&dst->__refcnt))
|
||||||
dst = NULL;
|
dst = NULL;
|
||||||
if (dst) {
|
if (dst) {
|
||||||
if (dst->obsolete && dst->ops->check(dst, cookie) == NULL) {
|
if (!dst->obsolete || dst->ops->check(dst, cookie)) {
|
||||||
|
*saddr = idst->saddr;
|
||||||
|
} else {
|
||||||
tunnel_dst_reset(t);
|
tunnel_dst_reset(t);
|
||||||
dst_release(dst);
|
dst_release(dst);
|
||||||
dst = NULL;
|
dst = NULL;
|
||||||
@ -367,7 +374,7 @@ static int ip_tunnel_bind_dev(struct net_device *dev)
|
|||||||
|
|
||||||
if (!IS_ERR(rt)) {
|
if (!IS_ERR(rt)) {
|
||||||
tdev = rt->dst.dev;
|
tdev = rt->dst.dev;
|
||||||
tunnel_dst_set(tunnel, &rt->dst);
|
tunnel_dst_set(tunnel, &rt->dst, fl4.saddr);
|
||||||
ip_rt_put(rt);
|
ip_rt_put(rt);
|
||||||
}
|
}
|
||||||
if (dev->type != ARPHRD_ETHER)
|
if (dev->type != ARPHRD_ETHER)
|
||||||
@ -610,7 +617,7 @@ void ip_tunnel_xmit(struct sk_buff *skb, struct net_device *dev,
|
|||||||
init_tunnel_flow(&fl4, protocol, dst, tnl_params->saddr,
|
init_tunnel_flow(&fl4, protocol, dst, tnl_params->saddr,
|
||||||
tunnel->parms.o_key, RT_TOS(tos), tunnel->parms.link);
|
tunnel->parms.o_key, RT_TOS(tos), tunnel->parms.link);
|
||||||
|
|
||||||
rt = connected ? tunnel_rtable_get(tunnel, 0) : NULL;
|
rt = connected ? tunnel_rtable_get(tunnel, 0, &fl4.saddr) : NULL;
|
||||||
|
|
||||||
if (!rt) {
|
if (!rt) {
|
||||||
rt = ip_route_output_key(tunnel->net, &fl4);
|
rt = ip_route_output_key(tunnel->net, &fl4);
|
||||||
@ -620,7 +627,7 @@ void ip_tunnel_xmit(struct sk_buff *skb, struct net_device *dev,
|
|||||||
goto tx_error;
|
goto tx_error;
|
||||||
}
|
}
|
||||||
if (connected)
|
if (connected)
|
||||||
tunnel_dst_set(tunnel, &rt->dst);
|
tunnel_dst_set(tunnel, &rt->dst, fl4.saddr);
|
||||||
}
|
}
|
||||||
|
|
||||||
if (rt->dst.dev == dev) {
|
if (rt->dst.dev == dev) {
|
||||||
|
@ -218,7 +218,8 @@ static void tcp_vegas_cong_avoid(struct sock *sk, u32 ack, u32 acked)
|
|||||||
* This is:
|
* This is:
|
||||||
* (actual rate in segments) * baseRTT
|
* (actual rate in segments) * baseRTT
|
||||||
*/
|
*/
|
||||||
target_cwnd = tp->snd_cwnd * vegas->baseRTT / rtt;
|
target_cwnd = (u64)tp->snd_cwnd * vegas->baseRTT;
|
||||||
|
do_div(target_cwnd, rtt);
|
||||||
|
|
||||||
/* Calculate the difference between the window we had,
|
/* Calculate the difference between the window we had,
|
||||||
* and the window we would like to have. This quantity
|
* and the window we would like to have. This quantity
|
||||||
|
@ -144,7 +144,7 @@ static void tcp_veno_cong_avoid(struct sock *sk, u32 ack, u32 acked)
|
|||||||
|
|
||||||
rtt = veno->minrtt;
|
rtt = veno->minrtt;
|
||||||
|
|
||||||
target_cwnd = (tp->snd_cwnd * veno->basertt);
|
target_cwnd = (u64)tp->snd_cwnd * veno->basertt;
|
||||||
target_cwnd <<= V_PARAM_SHIFT;
|
target_cwnd <<= V_PARAM_SHIFT;
|
||||||
do_div(target_cwnd, rtt);
|
do_div(target_cwnd, rtt);
|
||||||
|
|
||||||
|
@ -82,6 +82,7 @@ static int __net_init ipv6_sysctl_net_init(struct net *net)
|
|||||||
ipv6_table[1].data = &net->ipv6.sysctl.anycast_src_echo_reply;
|
ipv6_table[1].data = &net->ipv6.sysctl.anycast_src_echo_reply;
|
||||||
ipv6_table[2].data = &net->ipv6.sysctl.flowlabel_consistency;
|
ipv6_table[2].data = &net->ipv6.sysctl.flowlabel_consistency;
|
||||||
ipv6_table[3].data = &net->ipv6.sysctl.auto_flowlabels;
|
ipv6_table[3].data = &net->ipv6.sysctl.auto_flowlabels;
|
||||||
|
ipv6_table[4].data = &net->ipv6.sysctl.fwmark_reflect;
|
||||||
|
|
||||||
ipv6_route_table = ipv6_route_sysctl_init(net);
|
ipv6_route_table = ipv6_route_sysctl_init(net);
|
||||||
if (!ipv6_route_table)
|
if (!ipv6_route_table)
|
||||||
|
@ -967,8 +967,8 @@ ip_vs_tunnel_xmit_v6(struct sk_buff *skb, struct ip_vs_conn *cp,
|
|||||||
iph->nexthdr = IPPROTO_IPV6;
|
iph->nexthdr = IPPROTO_IPV6;
|
||||||
iph->payload_len = old_iph->payload_len;
|
iph->payload_len = old_iph->payload_len;
|
||||||
be16_add_cpu(&iph->payload_len, sizeof(*old_iph));
|
be16_add_cpu(&iph->payload_len, sizeof(*old_iph));
|
||||||
iph->priority = old_iph->priority;
|
|
||||||
memset(&iph->flow_lbl, 0, sizeof(iph->flow_lbl));
|
memset(&iph->flow_lbl, 0, sizeof(iph->flow_lbl));
|
||||||
|
ipv6_change_dsfield(iph, 0, ipv6_get_dsfield(old_iph));
|
||||||
iph->daddr = cp->daddr.in6;
|
iph->daddr = cp->daddr.in6;
|
||||||
iph->saddr = saddr;
|
iph->saddr = saddr;
|
||||||
iph->hop_limit = old_iph->hop_limit;
|
iph->hop_limit = old_iph->hop_limit;
|
||||||
|
@ -3144,6 +3144,7 @@ static int nft_del_setelem(struct nft_ctx *ctx, struct nft_set *set,
|
|||||||
if (set->flags & NFT_SET_MAP)
|
if (set->flags & NFT_SET_MAP)
|
||||||
nft_data_uninit(&elem.data, set->dtype);
|
nft_data_uninit(&elem.data, set->dtype);
|
||||||
|
|
||||||
|
return 0;
|
||||||
err2:
|
err2:
|
||||||
nft_data_uninit(&elem.key, desc.type);
|
nft_data_uninit(&elem.key, desc.type);
|
||||||
err1:
|
err1:
|
||||||
|
@ -50,11 +50,14 @@ struct xt_led_info_internal {
|
|||||||
struct timer_list timer;
|
struct timer_list timer;
|
||||||
};
|
};
|
||||||
|
|
||||||
|
#define XT_LED_BLINK_DELAY 50 /* ms */
|
||||||
|
|
||||||
static unsigned int
|
static unsigned int
|
||||||
led_tg(struct sk_buff *skb, const struct xt_action_param *par)
|
led_tg(struct sk_buff *skb, const struct xt_action_param *par)
|
||||||
{
|
{
|
||||||
const struct xt_led_info *ledinfo = par->targinfo;
|
const struct xt_led_info *ledinfo = par->targinfo;
|
||||||
struct xt_led_info_internal *ledinternal = ledinfo->internal_data;
|
struct xt_led_info_internal *ledinternal = ledinfo->internal_data;
|
||||||
|
unsigned long led_delay = XT_LED_BLINK_DELAY;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* If "always blink" is enabled, and there's still some time until the
|
* If "always blink" is enabled, and there's still some time until the
|
||||||
@ -62,9 +65,10 @@ led_tg(struct sk_buff *skb, const struct xt_action_param *par)
|
|||||||
*/
|
*/
|
||||||
if ((ledinfo->delay > 0) && ledinfo->always_blink &&
|
if ((ledinfo->delay > 0) && ledinfo->always_blink &&
|
||||||
timer_pending(&ledinternal->timer))
|
timer_pending(&ledinternal->timer))
|
||||||
led_trigger_event(&ledinternal->netfilter_led_trigger, LED_OFF);
|
led_trigger_blink_oneshot(&ledinternal->netfilter_led_trigger,
|
||||||
|
&led_delay, &led_delay, 1);
|
||||||
led_trigger_event(&ledinternal->netfilter_led_trigger, LED_FULL);
|
else
|
||||||
|
led_trigger_event(&ledinternal->netfilter_led_trigger, LED_FULL);
|
||||||
|
|
||||||
/* If there's a positive delay, start/update the timer */
|
/* If there's a positive delay, start/update the timer */
|
||||||
if (ledinfo->delay > 0) {
|
if (ledinfo->delay > 0) {
|
||||||
|
@ -599,7 +599,7 @@ out:
|
|||||||
return err;
|
return err;
|
||||||
no_route:
|
no_route:
|
||||||
kfree_skb(nskb);
|
kfree_skb(nskb);
|
||||||
IP_INC_STATS_BH(sock_net(asoc->base.sk), IPSTATS_MIB_OUTNOROUTES);
|
IP_INC_STATS(sock_net(asoc->base.sk), IPSTATS_MIB_OUTNOROUTES);
|
||||||
|
|
||||||
/* FIXME: Returning the 'err' will effect all the associations
|
/* FIXME: Returning the 'err' will effect all the associations
|
||||||
* associated with a socket, although only one of the paths of the
|
* associated with a socket, although only one of the paths of the
|
||||||
|
Loading…
Reference in New Issue
Block a user