Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net
Pull networking fixes from David Miller: 1) Several routines do not use netdev_features_t to hold such bitmasks, fixes from Patrick McHardy and Bjørn Mork. 2) Update cpsw IRQ software state and the actual HW irq enabling in the correct order. From Mugunthan V N. 3) When sending tipc packets to multiple bearers, we have to make copies of the SKB rather than just giving the original SKB directly. Fix from Gerlando Falauto. 4) Fix race with bridging topology change timer, from Stephen Hemminger. 5) Fix TCPv6 segmentation handling in GRE and VXLAN, from Pravin B Shelar. 6) Endian bug in USB pegasus driver, from Dan Carpenter. 7) Fix crashes on MTU reduction in USB asix driver, from Holger Eitzenberger. 8) Don't allow the kernel to BUG() just because the user puts some crap in an AF_PACKET mmap() ring descriptor. Fix from Daniel Borkmann. 9) Don't use variable sized arrays on the stack in xen-netback, from Wei Liu. 10) Fix stats reporting and an unbalanced napi_disable() in be2net driver. From Somnath Kotur and Ajit Khaparde. * git://git.kernel.org/pub/scm/linux/kernel/git/davem/net: (25 commits) cxgb4: fix error recovery when t4_fw_hello returns a positive value sky2: Fix crash on receiving VLAN frames packet: tpacket_v3: do not trigger bug() on wrong header status asix: fix BUG in receive path when lowering MTU net: qmi_wwan: Add Telewell TW-LTE 4G usbnet: pegasus: endian bug in write_mii_word() vxlan: Fix TCPv6 segmentation. gre: Fix GREv4 TCPv6 segmentation. bridge: fix race with topology change timer tipc: pskb_copy() buffers when sending on more than one bearer tipc: tipc_bcbearer_send(): simplify bearer selection tipc: cosmetic: clean up comments and break a long line drivers: net: cpsw: irq not disabled in cpsw isr in particular sequence xen-netback: better names for thresholds xen-netback: avoid allocating variable size array on stack xen-netback: remove redundent parameter in netbk_count_requests be2net: Fix to fail probe if MSI-X enable fails for a VF be2net: avoid napi_disable() when it has not been enabled be2net: Fix firmware download for Lancer be2net: Fix to receive Multicast Packets when Promiscuous mode is enabled on certain devices ...
This commit is contained in:
commit
1aaf6d3d3d
@ -5204,7 +5204,7 @@ static pci_ers_result_t eeh_slot_reset(struct pci_dev *pdev)
|
|||||||
|
|
||||||
if (t4_wait_dev_ready(adap) < 0)
|
if (t4_wait_dev_ready(adap) < 0)
|
||||||
return PCI_ERS_RESULT_DISCONNECT;
|
return PCI_ERS_RESULT_DISCONNECT;
|
||||||
if (t4_fw_hello(adap, adap->fn, adap->fn, MASTER_MUST, NULL))
|
if (t4_fw_hello(adap, adap->fn, adap->fn, MASTER_MUST, NULL) < 0)
|
||||||
return PCI_ERS_RESULT_DISCONNECT;
|
return PCI_ERS_RESULT_DISCONNECT;
|
||||||
adap->flags |= FW_OK;
|
adap->flags |= FW_OK;
|
||||||
if (adap_init1(adap, &c))
|
if (adap_init1(adap, &c))
|
||||||
|
@ -327,6 +327,7 @@ enum vf_state {
|
|||||||
|
|
||||||
#define BE_FLAGS_LINK_STATUS_INIT 1
|
#define BE_FLAGS_LINK_STATUS_INIT 1
|
||||||
#define BE_FLAGS_WORKER_SCHEDULED (1 << 3)
|
#define BE_FLAGS_WORKER_SCHEDULED (1 << 3)
|
||||||
|
#define BE_FLAGS_NAPI_ENABLED (1 << 9)
|
||||||
#define BE_UC_PMAC_COUNT 30
|
#define BE_UC_PMAC_COUNT 30
|
||||||
#define BE_VF_UC_PMAC_COUNT 2
|
#define BE_VF_UC_PMAC_COUNT 2
|
||||||
#define BE_FLAGS_QNQ_ASYNC_EVT_RCVD (1 << 11)
|
#define BE_FLAGS_QNQ_ASYNC_EVT_RCVD (1 << 11)
|
||||||
|
@ -961,19 +961,8 @@ int be_cmd_cq_create(struct be_adapter *adapter, struct be_queue_info *cq,
|
|||||||
OPCODE_COMMON_CQ_CREATE, sizeof(*req), wrb, NULL);
|
OPCODE_COMMON_CQ_CREATE, sizeof(*req), wrb, NULL);
|
||||||
|
|
||||||
req->num_pages = cpu_to_le16(PAGES_4K_SPANNED(q_mem->va, q_mem->size));
|
req->num_pages = cpu_to_le16(PAGES_4K_SPANNED(q_mem->va, q_mem->size));
|
||||||
if (lancer_chip(adapter)) {
|
|
||||||
req->hdr.version = 2;
|
if (BEx_chip(adapter)) {
|
||||||
req->page_size = 1; /* 1 for 4K */
|
|
||||||
AMAP_SET_BITS(struct amap_cq_context_lancer, nodelay, ctxt,
|
|
||||||
no_delay);
|
|
||||||
AMAP_SET_BITS(struct amap_cq_context_lancer, count, ctxt,
|
|
||||||
__ilog2_u32(cq->len/256));
|
|
||||||
AMAP_SET_BITS(struct amap_cq_context_lancer, valid, ctxt, 1);
|
|
||||||
AMAP_SET_BITS(struct amap_cq_context_lancer, eventable,
|
|
||||||
ctxt, 1);
|
|
||||||
AMAP_SET_BITS(struct amap_cq_context_lancer, eqid,
|
|
||||||
ctxt, eq->id);
|
|
||||||
} else {
|
|
||||||
AMAP_SET_BITS(struct amap_cq_context_be, coalescwm, ctxt,
|
AMAP_SET_BITS(struct amap_cq_context_be, coalescwm, ctxt,
|
||||||
coalesce_wm);
|
coalesce_wm);
|
||||||
AMAP_SET_BITS(struct amap_cq_context_be, nodelay,
|
AMAP_SET_BITS(struct amap_cq_context_be, nodelay,
|
||||||
@ -983,6 +972,18 @@ int be_cmd_cq_create(struct be_adapter *adapter, struct be_queue_info *cq,
|
|||||||
AMAP_SET_BITS(struct amap_cq_context_be, valid, ctxt, 1);
|
AMAP_SET_BITS(struct amap_cq_context_be, valid, ctxt, 1);
|
||||||
AMAP_SET_BITS(struct amap_cq_context_be, eventable, ctxt, 1);
|
AMAP_SET_BITS(struct amap_cq_context_be, eventable, ctxt, 1);
|
||||||
AMAP_SET_BITS(struct amap_cq_context_be, eqid, ctxt, eq->id);
|
AMAP_SET_BITS(struct amap_cq_context_be, eqid, ctxt, eq->id);
|
||||||
|
} else {
|
||||||
|
req->hdr.version = 2;
|
||||||
|
req->page_size = 1; /* 1 for 4K */
|
||||||
|
AMAP_SET_BITS(struct amap_cq_context_v2, nodelay, ctxt,
|
||||||
|
no_delay);
|
||||||
|
AMAP_SET_BITS(struct amap_cq_context_v2, count, ctxt,
|
||||||
|
__ilog2_u32(cq->len/256));
|
||||||
|
AMAP_SET_BITS(struct amap_cq_context_v2, valid, ctxt, 1);
|
||||||
|
AMAP_SET_BITS(struct amap_cq_context_v2, eventable,
|
||||||
|
ctxt, 1);
|
||||||
|
AMAP_SET_BITS(struct amap_cq_context_v2, eqid,
|
||||||
|
ctxt, eq->id);
|
||||||
}
|
}
|
||||||
|
|
||||||
be_dws_cpu_to_le(ctxt, sizeof(req->context));
|
be_dws_cpu_to_le(ctxt, sizeof(req->context));
|
||||||
@ -1763,10 +1764,12 @@ int be_cmd_rx_filter(struct be_adapter *adapter, u32 flags, u32 value)
|
|||||||
req->if_id = cpu_to_le32(adapter->if_handle);
|
req->if_id = cpu_to_le32(adapter->if_handle);
|
||||||
if (flags & IFF_PROMISC) {
|
if (flags & IFF_PROMISC) {
|
||||||
req->if_flags_mask = cpu_to_le32(BE_IF_FLAGS_PROMISCUOUS |
|
req->if_flags_mask = cpu_to_le32(BE_IF_FLAGS_PROMISCUOUS |
|
||||||
BE_IF_FLAGS_VLAN_PROMISCUOUS);
|
BE_IF_FLAGS_VLAN_PROMISCUOUS |
|
||||||
|
BE_IF_FLAGS_MCAST_PROMISCUOUS);
|
||||||
if (value == ON)
|
if (value == ON)
|
||||||
req->if_flags = cpu_to_le32(BE_IF_FLAGS_PROMISCUOUS |
|
req->if_flags = cpu_to_le32(BE_IF_FLAGS_PROMISCUOUS |
|
||||||
BE_IF_FLAGS_VLAN_PROMISCUOUS);
|
BE_IF_FLAGS_VLAN_PROMISCUOUS |
|
||||||
|
BE_IF_FLAGS_MCAST_PROMISCUOUS);
|
||||||
} else if (flags & IFF_ALLMULTI) {
|
} else if (flags & IFF_ALLMULTI) {
|
||||||
req->if_flags_mask = req->if_flags =
|
req->if_flags_mask = req->if_flags =
|
||||||
cpu_to_le32(BE_IF_FLAGS_MCAST_PROMISCUOUS);
|
cpu_to_le32(BE_IF_FLAGS_MCAST_PROMISCUOUS);
|
||||||
@ -2084,7 +2087,7 @@ int lancer_cmd_write_object(struct be_adapter *adapter, struct be_dma_mem *cmd,
|
|||||||
spin_unlock_bh(&adapter->mcc_lock);
|
spin_unlock_bh(&adapter->mcc_lock);
|
||||||
|
|
||||||
if (!wait_for_completion_timeout(&adapter->flash_compl,
|
if (!wait_for_completion_timeout(&adapter->flash_compl,
|
||||||
msecs_to_jiffies(30000)))
|
msecs_to_jiffies(60000)))
|
||||||
status = -1;
|
status = -1;
|
||||||
else
|
else
|
||||||
status = adapter->flash_status;
|
status = adapter->flash_status;
|
||||||
|
@ -381,7 +381,7 @@ struct amap_cq_context_be {
|
|||||||
u8 rsvd5[32]; /* dword 3*/
|
u8 rsvd5[32]; /* dword 3*/
|
||||||
} __packed;
|
} __packed;
|
||||||
|
|
||||||
struct amap_cq_context_lancer {
|
struct amap_cq_context_v2 {
|
||||||
u8 rsvd0[12]; /* dword 0*/
|
u8 rsvd0[12]; /* dword 0*/
|
||||||
u8 coalescwm[2]; /* dword 0*/
|
u8 coalescwm[2]; /* dword 0*/
|
||||||
u8 nodelay; /* dword 0*/
|
u8 nodelay; /* dword 0*/
|
||||||
|
@ -85,6 +85,7 @@ static const struct be_ethtool_stat et_stats[] = {
|
|||||||
{DRVSTAT_INFO(tx_pauseframes)},
|
{DRVSTAT_INFO(tx_pauseframes)},
|
||||||
{DRVSTAT_INFO(tx_controlframes)},
|
{DRVSTAT_INFO(tx_controlframes)},
|
||||||
{DRVSTAT_INFO(rx_priority_pause_frames)},
|
{DRVSTAT_INFO(rx_priority_pause_frames)},
|
||||||
|
{DRVSTAT_INFO(tx_priority_pauseframes)},
|
||||||
/* Received packets dropped when an internal fifo going into
|
/* Received packets dropped when an internal fifo going into
|
||||||
* main packet buffer tank (PMEM) overflows.
|
* main packet buffer tank (PMEM) overflows.
|
||||||
*/
|
*/
|
||||||
|
@ -410,6 +410,7 @@ static void populate_be_v1_stats(struct be_adapter *adapter)
|
|||||||
drvs->rxpp_fifo_overflow_drop = port_stats->rxpp_fifo_overflow_drop;
|
drvs->rxpp_fifo_overflow_drop = port_stats->rxpp_fifo_overflow_drop;
|
||||||
drvs->tx_pauseframes = port_stats->tx_pauseframes;
|
drvs->tx_pauseframes = port_stats->tx_pauseframes;
|
||||||
drvs->tx_controlframes = port_stats->tx_controlframes;
|
drvs->tx_controlframes = port_stats->tx_controlframes;
|
||||||
|
drvs->tx_priority_pauseframes = port_stats->tx_priority_pauseframes;
|
||||||
drvs->jabber_events = port_stats->jabber_events;
|
drvs->jabber_events = port_stats->jabber_events;
|
||||||
drvs->rx_drops_no_pbuf = rxf_stats->rx_drops_no_pbuf;
|
drvs->rx_drops_no_pbuf = rxf_stats->rx_drops_no_pbuf;
|
||||||
drvs->rx_drops_no_erx_descr = rxf_stats->rx_drops_no_erx_descr;
|
drvs->rx_drops_no_erx_descr = rxf_stats->rx_drops_no_erx_descr;
|
||||||
@ -471,11 +472,26 @@ static void accumulate_16bit_val(u32 *acc, u16 val)
|
|||||||
ACCESS_ONCE(*acc) = newacc;
|
ACCESS_ONCE(*acc) = newacc;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
void populate_erx_stats(struct be_adapter *adapter,
|
||||||
|
struct be_rx_obj *rxo,
|
||||||
|
u32 erx_stat)
|
||||||
|
{
|
||||||
|
if (!BEx_chip(adapter))
|
||||||
|
rx_stats(rxo)->rx_drops_no_frags = erx_stat;
|
||||||
|
else
|
||||||
|
/* below erx HW counter can actually wrap around after
|
||||||
|
* 65535. Driver accumulates a 32-bit value
|
||||||
|
*/
|
||||||
|
accumulate_16bit_val(&rx_stats(rxo)->rx_drops_no_frags,
|
||||||
|
(u16)erx_stat);
|
||||||
|
}
|
||||||
|
|
||||||
void be_parse_stats(struct be_adapter *adapter)
|
void be_parse_stats(struct be_adapter *adapter)
|
||||||
{
|
{
|
||||||
struct be_erx_stats_v1 *erx = be_erx_stats_from_cmd(adapter);
|
struct be_erx_stats_v1 *erx = be_erx_stats_from_cmd(adapter);
|
||||||
struct be_rx_obj *rxo;
|
struct be_rx_obj *rxo;
|
||||||
int i;
|
int i;
|
||||||
|
u32 erx_stat;
|
||||||
|
|
||||||
if (lancer_chip(adapter)) {
|
if (lancer_chip(adapter)) {
|
||||||
populate_lancer_stats(adapter);
|
populate_lancer_stats(adapter);
|
||||||
@ -488,12 +504,8 @@ void be_parse_stats(struct be_adapter *adapter)
|
|||||||
|
|
||||||
/* as erx_v1 is longer than v0, ok to use v1 for v0 access */
|
/* as erx_v1 is longer than v0, ok to use v1 for v0 access */
|
||||||
for_all_rx_queues(adapter, rxo, i) {
|
for_all_rx_queues(adapter, rxo, i) {
|
||||||
/* below erx HW counter can actually wrap around after
|
erx_stat = erx->rx_drops_no_fragments[rxo->q.id];
|
||||||
* 65535. Driver accumulates a 32-bit value
|
populate_erx_stats(adapter, rxo, erx_stat);
|
||||||
*/
|
|
||||||
accumulate_16bit_val(&rx_stats(rxo)->rx_drops_no_frags,
|
|
||||||
(u16)erx->rx_drops_no_fragments \
|
|
||||||
[rxo->q.id]);
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -2378,7 +2390,7 @@ static uint be_num_rss_want(struct be_adapter *adapter)
|
|||||||
return num;
|
return num;
|
||||||
}
|
}
|
||||||
|
|
||||||
static void be_msix_enable(struct be_adapter *adapter)
|
static int be_msix_enable(struct be_adapter *adapter)
|
||||||
{
|
{
|
||||||
#define BE_MIN_MSIX_VECTORS 1
|
#define BE_MIN_MSIX_VECTORS 1
|
||||||
int i, status, num_vec, num_roce_vec = 0;
|
int i, status, num_vec, num_roce_vec = 0;
|
||||||
@ -2403,13 +2415,17 @@ static void be_msix_enable(struct be_adapter *adapter)
|
|||||||
goto done;
|
goto done;
|
||||||
} else if (status >= BE_MIN_MSIX_VECTORS) {
|
} else if (status >= BE_MIN_MSIX_VECTORS) {
|
||||||
num_vec = status;
|
num_vec = status;
|
||||||
if (pci_enable_msix(adapter->pdev, adapter->msix_entries,
|
status = pci_enable_msix(adapter->pdev, adapter->msix_entries,
|
||||||
num_vec) == 0)
|
num_vec);
|
||||||
|
if (!status)
|
||||||
goto done;
|
goto done;
|
||||||
}
|
}
|
||||||
|
|
||||||
dev_warn(dev, "MSIx enable failed\n");
|
dev_warn(dev, "MSIx enable failed\n");
|
||||||
return;
|
/* INTx is not supported in VFs, so fail probe if enable_msix fails */
|
||||||
|
if (!be_physfn(adapter))
|
||||||
|
return status;
|
||||||
|
return 0;
|
||||||
done:
|
done:
|
||||||
if (be_roce_supported(adapter)) {
|
if (be_roce_supported(adapter)) {
|
||||||
if (num_vec > num_roce_vec) {
|
if (num_vec > num_roce_vec) {
|
||||||
@ -2423,7 +2439,7 @@ done:
|
|||||||
} else
|
} else
|
||||||
adapter->num_msix_vec = num_vec;
|
adapter->num_msix_vec = num_vec;
|
||||||
dev_info(dev, "enabled %d MSI-x vector(s)\n", adapter->num_msix_vec);
|
dev_info(dev, "enabled %d MSI-x vector(s)\n", adapter->num_msix_vec);
|
||||||
return;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline int be_msix_vec_get(struct be_adapter *adapter,
|
static inline int be_msix_vec_get(struct be_adapter *adapter,
|
||||||
@ -2536,8 +2552,11 @@ static int be_close(struct net_device *netdev)
|
|||||||
|
|
||||||
be_roce_dev_close(adapter);
|
be_roce_dev_close(adapter);
|
||||||
|
|
||||||
|
if (adapter->flags & BE_FLAGS_NAPI_ENABLED) {
|
||||||
for_all_evt_queues(adapter, eqo, i)
|
for_all_evt_queues(adapter, eqo, i)
|
||||||
napi_disable(&eqo->napi);
|
napi_disable(&eqo->napi);
|
||||||
|
adapter->flags &= ~BE_FLAGS_NAPI_ENABLED;
|
||||||
|
}
|
||||||
|
|
||||||
be_async_mcc_disable(adapter);
|
be_async_mcc_disable(adapter);
|
||||||
|
|
||||||
@ -2631,7 +2650,9 @@ static int be_open(struct net_device *netdev)
|
|||||||
if (status)
|
if (status)
|
||||||
goto err;
|
goto err;
|
||||||
|
|
||||||
be_irq_register(adapter);
|
status = be_irq_register(adapter);
|
||||||
|
if (status)
|
||||||
|
goto err;
|
||||||
|
|
||||||
for_all_rx_queues(adapter, rxo, i)
|
for_all_rx_queues(adapter, rxo, i)
|
||||||
be_cq_notify(adapter, rxo->cq.id, true, 0);
|
be_cq_notify(adapter, rxo->cq.id, true, 0);
|
||||||
@ -2645,6 +2666,7 @@ static int be_open(struct net_device *netdev)
|
|||||||
napi_enable(&eqo->napi);
|
napi_enable(&eqo->napi);
|
||||||
be_eq_notify(adapter, eqo->q.id, true, false, 0);
|
be_eq_notify(adapter, eqo->q.id, true, false, 0);
|
||||||
}
|
}
|
||||||
|
adapter->flags |= BE_FLAGS_NAPI_ENABLED;
|
||||||
|
|
||||||
status = be_cmd_link_status_query(adapter, NULL, &link_status, 0);
|
status = be_cmd_link_status_query(adapter, NULL, &link_status, 0);
|
||||||
if (!status)
|
if (!status)
|
||||||
@ -3100,7 +3122,9 @@ static int be_setup(struct be_adapter *adapter)
|
|||||||
if (status)
|
if (status)
|
||||||
goto err;
|
goto err;
|
||||||
|
|
||||||
be_msix_enable(adapter);
|
status = be_msix_enable(adapter);
|
||||||
|
if (status)
|
||||||
|
goto err;
|
||||||
|
|
||||||
status = be_evt_queues_create(adapter);
|
status = be_evt_queues_create(adapter);
|
||||||
if (status)
|
if (status)
|
||||||
|
@ -2496,10 +2496,12 @@ static struct sk_buff *receive_copy(struct sky2_port *sky2,
|
|||||||
skb->ip_summed = re->skb->ip_summed;
|
skb->ip_summed = re->skb->ip_summed;
|
||||||
skb->csum = re->skb->csum;
|
skb->csum = re->skb->csum;
|
||||||
skb->rxhash = re->skb->rxhash;
|
skb->rxhash = re->skb->rxhash;
|
||||||
|
skb->vlan_proto = re->skb->vlan_proto;
|
||||||
skb->vlan_tci = re->skb->vlan_tci;
|
skb->vlan_tci = re->skb->vlan_tci;
|
||||||
|
|
||||||
pci_dma_sync_single_for_device(sky2->hw->pdev, re->data_addr,
|
pci_dma_sync_single_for_device(sky2->hw->pdev, re->data_addr,
|
||||||
length, PCI_DMA_FROMDEVICE);
|
length, PCI_DMA_FROMDEVICE);
|
||||||
|
re->skb->vlan_proto = 0;
|
||||||
re->skb->vlan_tci = 0;
|
re->skb->vlan_tci = 0;
|
||||||
re->skb->rxhash = 0;
|
re->skb->rxhash = 0;
|
||||||
re->skb->ip_summed = CHECKSUM_NONE;
|
re->skb->ip_summed = CHECKSUM_NONE;
|
||||||
|
@ -555,8 +555,8 @@ static int cpsw_poll(struct napi_struct *napi, int budget)
|
|||||||
cpdma_ctlr_eoi(priv->dma, CPDMA_EOI_RX);
|
cpdma_ctlr_eoi(priv->dma, CPDMA_EOI_RX);
|
||||||
prim_cpsw = cpsw_get_slave_priv(priv, 0);
|
prim_cpsw = cpsw_get_slave_priv(priv, 0);
|
||||||
if (prim_cpsw->irq_enabled == false) {
|
if (prim_cpsw->irq_enabled == false) {
|
||||||
cpsw_enable_irq(priv);
|
|
||||||
prim_cpsw->irq_enabled = true;
|
prim_cpsw->irq_enabled = true;
|
||||||
|
cpsw_enable_irq(priv);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -100,6 +100,9 @@ int asix_rx_fixup_internal(struct usbnet *dev, struct sk_buff *skb,
|
|||||||
netdev_err(dev->net, "asix_rx_fixup() Bad RX Length %d\n",
|
netdev_err(dev->net, "asix_rx_fixup() Bad RX Length %d\n",
|
||||||
rx->size);
|
rx->size);
|
||||||
kfree_skb(rx->ax_skb);
|
kfree_skb(rx->ax_skb);
|
||||||
|
rx->ax_skb = NULL;
|
||||||
|
rx->size = 0U;
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -256,8 +256,9 @@ static int mdio_read(struct net_device *dev, int phy_id, int loc)
|
|||||||
static void mdio_write(struct net_device *dev, int phy_id, int loc, int val)
|
static void mdio_write(struct net_device *dev, int phy_id, int loc, int val)
|
||||||
{
|
{
|
||||||
pegasus_t *pegasus = netdev_priv(dev);
|
pegasus_t *pegasus = netdev_priv(dev);
|
||||||
|
u16 data = val;
|
||||||
|
|
||||||
write_mii_word(pegasus, phy_id, loc, (__u16 *)&val);
|
write_mii_word(pegasus, phy_id, loc, &data);
|
||||||
}
|
}
|
||||||
|
|
||||||
static int read_eprom_word(pegasus_t *pegasus, __u8 index, __u16 *retdata)
|
static int read_eprom_word(pegasus_t *pegasus, __u8 index, __u16 *retdata)
|
||||||
|
@ -548,6 +548,7 @@ static const struct usb_device_id products[] = {
|
|||||||
{QMI_FIXED_INTF(0x19d2, 0x0265, 4)}, /* ONDA MT8205 4G LTE */
|
{QMI_FIXED_INTF(0x19d2, 0x0265, 4)}, /* ONDA MT8205 4G LTE */
|
||||||
{QMI_FIXED_INTF(0x19d2, 0x0284, 4)}, /* ZTE MF880 */
|
{QMI_FIXED_INTF(0x19d2, 0x0284, 4)}, /* ZTE MF880 */
|
||||||
{QMI_FIXED_INTF(0x19d2, 0x0326, 4)}, /* ZTE MF821D */
|
{QMI_FIXED_INTF(0x19d2, 0x0326, 4)}, /* ZTE MF821D */
|
||||||
|
{QMI_FIXED_INTF(0x19d2, 0x0412, 4)}, /* Telewell TW-LTE 4G */
|
||||||
{QMI_FIXED_INTF(0x19d2, 0x1008, 4)}, /* ZTE (Vodafone) K3570-Z */
|
{QMI_FIXED_INTF(0x19d2, 0x1008, 4)}, /* ZTE (Vodafone) K3570-Z */
|
||||||
{QMI_FIXED_INTF(0x19d2, 0x1010, 4)}, /* ZTE (Vodafone) K3571-Z */
|
{QMI_FIXED_INTF(0x19d2, 0x1010, 4)}, /* ZTE (Vodafone) K3571-Z */
|
||||||
{QMI_FIXED_INTF(0x19d2, 0x1012, 4)},
|
{QMI_FIXED_INTF(0x19d2, 0x1012, 4)},
|
||||||
|
@ -51,9 +51,17 @@
|
|||||||
* This is the maximum slots a skb can have. If a guest sends a skb
|
* This is the maximum slots a skb can have. If a guest sends a skb
|
||||||
* which exceeds this limit it is considered malicious.
|
* which exceeds this limit it is considered malicious.
|
||||||
*/
|
*/
|
||||||
#define MAX_SKB_SLOTS_DEFAULT 20
|
#define FATAL_SKB_SLOTS_DEFAULT 20
|
||||||
static unsigned int max_skb_slots = MAX_SKB_SLOTS_DEFAULT;
|
static unsigned int fatal_skb_slots = FATAL_SKB_SLOTS_DEFAULT;
|
||||||
module_param(max_skb_slots, uint, 0444);
|
module_param(fatal_skb_slots, uint, 0444);
|
||||||
|
|
||||||
|
/*
|
||||||
|
* To avoid confusion, we define XEN_NETBK_LEGACY_SLOTS_MAX indicating
|
||||||
|
* the maximum slots a valid packet can use. Now this value is defined
|
||||||
|
* to be XEN_NETIF_NR_SLOTS_MIN, which is supposed to be supported by
|
||||||
|
* all backend.
|
||||||
|
*/
|
||||||
|
#define XEN_NETBK_LEGACY_SLOTS_MAX XEN_NETIF_NR_SLOTS_MIN
|
||||||
|
|
||||||
typedef unsigned int pending_ring_idx_t;
|
typedef unsigned int pending_ring_idx_t;
|
||||||
#define INVALID_PENDING_RING_IDX (~0U)
|
#define INVALID_PENDING_RING_IDX (~0U)
|
||||||
@ -928,18 +936,20 @@ static void netbk_fatal_tx_err(struct xenvif *vif)
|
|||||||
|
|
||||||
static int netbk_count_requests(struct xenvif *vif,
|
static int netbk_count_requests(struct xenvif *vif,
|
||||||
struct xen_netif_tx_request *first,
|
struct xen_netif_tx_request *first,
|
||||||
RING_IDX first_idx,
|
|
||||||
struct xen_netif_tx_request *txp,
|
struct xen_netif_tx_request *txp,
|
||||||
int work_to_do)
|
int work_to_do)
|
||||||
{
|
{
|
||||||
RING_IDX cons = vif->tx.req_cons;
|
RING_IDX cons = vif->tx.req_cons;
|
||||||
int slots = 0;
|
int slots = 0;
|
||||||
int drop_err = 0;
|
int drop_err = 0;
|
||||||
|
int more_data;
|
||||||
|
|
||||||
if (!(first->flags & XEN_NETTXF_more_data))
|
if (!(first->flags & XEN_NETTXF_more_data))
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
do {
|
do {
|
||||||
|
struct xen_netif_tx_request dropped_tx = { 0 };
|
||||||
|
|
||||||
if (slots >= work_to_do) {
|
if (slots >= work_to_do) {
|
||||||
netdev_err(vif->dev,
|
netdev_err(vif->dev,
|
||||||
"Asked for %d slots but exceeds this limit\n",
|
"Asked for %d slots but exceeds this limit\n",
|
||||||
@ -951,28 +961,32 @@ static int netbk_count_requests(struct xenvif *vif,
|
|||||||
/* This guest is really using too many slots and
|
/* This guest is really using too many slots and
|
||||||
* considered malicious.
|
* considered malicious.
|
||||||
*/
|
*/
|
||||||
if (unlikely(slots >= max_skb_slots)) {
|
if (unlikely(slots >= fatal_skb_slots)) {
|
||||||
netdev_err(vif->dev,
|
netdev_err(vif->dev,
|
||||||
"Malicious frontend using %d slots, threshold %u\n",
|
"Malicious frontend using %d slots, threshold %u\n",
|
||||||
slots, max_skb_slots);
|
slots, fatal_skb_slots);
|
||||||
netbk_fatal_tx_err(vif);
|
netbk_fatal_tx_err(vif);
|
||||||
return -E2BIG;
|
return -E2BIG;
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Xen network protocol had implicit dependency on
|
/* Xen network protocol had implicit dependency on
|
||||||
* MAX_SKB_FRAGS. XEN_NETIF_NR_SLOTS_MIN is set to the
|
* MAX_SKB_FRAGS. XEN_NETBK_LEGACY_SLOTS_MAX is set to
|
||||||
* historical MAX_SKB_FRAGS value 18 to honor the same
|
* the historical MAX_SKB_FRAGS value 18 to honor the
|
||||||
* behavior as before. Any packet using more than 18
|
* same behavior as before. Any packet using more than
|
||||||
* slots but less than max_skb_slots slots is dropped
|
* 18 slots but less than fatal_skb_slots slots is
|
||||||
|
* dropped
|
||||||
*/
|
*/
|
||||||
if (!drop_err && slots >= XEN_NETIF_NR_SLOTS_MIN) {
|
if (!drop_err && slots >= XEN_NETBK_LEGACY_SLOTS_MAX) {
|
||||||
if (net_ratelimit())
|
if (net_ratelimit())
|
||||||
netdev_dbg(vif->dev,
|
netdev_dbg(vif->dev,
|
||||||
"Too many slots (%d) exceeding limit (%d), dropping packet\n",
|
"Too many slots (%d) exceeding limit (%d), dropping packet\n",
|
||||||
slots, XEN_NETIF_NR_SLOTS_MIN);
|
slots, XEN_NETBK_LEGACY_SLOTS_MAX);
|
||||||
drop_err = -E2BIG;
|
drop_err = -E2BIG;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if (drop_err)
|
||||||
|
txp = &dropped_tx;
|
||||||
|
|
||||||
memcpy(txp, RING_GET_REQUEST(&vif->tx, cons + slots),
|
memcpy(txp, RING_GET_REQUEST(&vif->tx, cons + slots),
|
||||||
sizeof(*txp));
|
sizeof(*txp));
|
||||||
|
|
||||||
@ -1002,10 +1016,16 @@ static int netbk_count_requests(struct xenvif *vif,
|
|||||||
netbk_fatal_tx_err(vif);
|
netbk_fatal_tx_err(vif);
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
}
|
}
|
||||||
} while ((txp++)->flags & XEN_NETTXF_more_data);
|
|
||||||
|
more_data = txp->flags & XEN_NETTXF_more_data;
|
||||||
|
|
||||||
|
if (!drop_err)
|
||||||
|
txp++;
|
||||||
|
|
||||||
|
} while (more_data);
|
||||||
|
|
||||||
if (drop_err) {
|
if (drop_err) {
|
||||||
netbk_tx_err(vif, first, first_idx + slots);
|
netbk_tx_err(vif, first, cons + slots);
|
||||||
return drop_err;
|
return drop_err;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -1042,7 +1062,7 @@ static struct gnttab_copy *xen_netbk_get_requests(struct xen_netbk *netbk,
|
|||||||
struct pending_tx_info *first = NULL;
|
struct pending_tx_info *first = NULL;
|
||||||
|
|
||||||
/* At this point shinfo->nr_frags is in fact the number of
|
/* At this point shinfo->nr_frags is in fact the number of
|
||||||
* slots, which can be as large as XEN_NETIF_NR_SLOTS_MIN.
|
* slots, which can be as large as XEN_NETBK_LEGACY_SLOTS_MAX.
|
||||||
*/
|
*/
|
||||||
nr_slots = shinfo->nr_frags;
|
nr_slots = shinfo->nr_frags;
|
||||||
|
|
||||||
@ -1404,12 +1424,12 @@ static unsigned xen_netbk_tx_build_gops(struct xen_netbk *netbk)
|
|||||||
struct sk_buff *skb;
|
struct sk_buff *skb;
|
||||||
int ret;
|
int ret;
|
||||||
|
|
||||||
while ((nr_pending_reqs(netbk) + XEN_NETIF_NR_SLOTS_MIN
|
while ((nr_pending_reqs(netbk) + XEN_NETBK_LEGACY_SLOTS_MAX
|
||||||
< MAX_PENDING_REQS) &&
|
< MAX_PENDING_REQS) &&
|
||||||
!list_empty(&netbk->net_schedule_list)) {
|
!list_empty(&netbk->net_schedule_list)) {
|
||||||
struct xenvif *vif;
|
struct xenvif *vif;
|
||||||
struct xen_netif_tx_request txreq;
|
struct xen_netif_tx_request txreq;
|
||||||
struct xen_netif_tx_request txfrags[max_skb_slots];
|
struct xen_netif_tx_request txfrags[XEN_NETBK_LEGACY_SLOTS_MAX];
|
||||||
struct page *page;
|
struct page *page;
|
||||||
struct xen_netif_extra_info extras[XEN_NETIF_EXTRA_TYPE_MAX-1];
|
struct xen_netif_extra_info extras[XEN_NETIF_EXTRA_TYPE_MAX-1];
|
||||||
u16 pending_idx;
|
u16 pending_idx;
|
||||||
@ -1470,8 +1490,7 @@ static unsigned xen_netbk_tx_build_gops(struct xen_netbk *netbk)
|
|||||||
continue;
|
continue;
|
||||||
}
|
}
|
||||||
|
|
||||||
ret = netbk_count_requests(vif, &txreq, idx,
|
ret = netbk_count_requests(vif, &txreq, txfrags, work_to_do);
|
||||||
txfrags, work_to_do);
|
|
||||||
if (unlikely(ret < 0))
|
if (unlikely(ret < 0))
|
||||||
continue;
|
continue;
|
||||||
|
|
||||||
@ -1498,7 +1517,7 @@ static unsigned xen_netbk_tx_build_gops(struct xen_netbk *netbk)
|
|||||||
pending_idx = netbk->pending_ring[index];
|
pending_idx = netbk->pending_ring[index];
|
||||||
|
|
||||||
data_len = (txreq.size > PKT_PROT_LEN &&
|
data_len = (txreq.size > PKT_PROT_LEN &&
|
||||||
ret < XEN_NETIF_NR_SLOTS_MIN) ?
|
ret < XEN_NETBK_LEGACY_SLOTS_MAX) ?
|
||||||
PKT_PROT_LEN : txreq.size;
|
PKT_PROT_LEN : txreq.size;
|
||||||
|
|
||||||
skb = alloc_skb(data_len + NET_SKB_PAD + NET_IP_ALIGN,
|
skb = alloc_skb(data_len + NET_SKB_PAD + NET_IP_ALIGN,
|
||||||
@ -1777,7 +1796,7 @@ static inline int rx_work_todo(struct xen_netbk *netbk)
|
|||||||
static inline int tx_work_todo(struct xen_netbk *netbk)
|
static inline int tx_work_todo(struct xen_netbk *netbk)
|
||||||
{
|
{
|
||||||
|
|
||||||
if ((nr_pending_reqs(netbk) + XEN_NETIF_NR_SLOTS_MIN
|
if ((nr_pending_reqs(netbk) + XEN_NETBK_LEGACY_SLOTS_MAX
|
||||||
< MAX_PENDING_REQS) &&
|
< MAX_PENDING_REQS) &&
|
||||||
!list_empty(&netbk->net_schedule_list))
|
!list_empty(&netbk->net_schedule_list))
|
||||||
return 1;
|
return 1;
|
||||||
@ -1862,11 +1881,11 @@ static int __init netback_init(void)
|
|||||||
if (!xen_domain())
|
if (!xen_domain())
|
||||||
return -ENODEV;
|
return -ENODEV;
|
||||||
|
|
||||||
if (max_skb_slots < XEN_NETIF_NR_SLOTS_MIN) {
|
if (fatal_skb_slots < XEN_NETBK_LEGACY_SLOTS_MAX) {
|
||||||
printk(KERN_INFO
|
printk(KERN_INFO
|
||||||
"xen-netback: max_skb_slots too small (%d), bump it to XEN_NETIF_NR_SLOTS_MIN (%d)\n",
|
"xen-netback: fatal_skb_slots too small (%d), bump it to XEN_NETBK_LEGACY_SLOTS_MAX (%d)\n",
|
||||||
max_skb_slots, XEN_NETIF_NR_SLOTS_MIN);
|
fatal_skb_slots, XEN_NETBK_LEGACY_SLOTS_MAX);
|
||||||
max_skb_slots = XEN_NETIF_NR_SLOTS_MIN;
|
fatal_skb_slots = XEN_NETBK_LEGACY_SLOTS_MAX;
|
||||||
}
|
}
|
||||||
|
|
||||||
xen_netbk_group_nr = num_online_cpus();
|
xen_netbk_group_nr = num_online_cpus();
|
||||||
|
@ -628,7 +628,7 @@ static netdev_features_t vlan_dev_fix_features(struct net_device *dev,
|
|||||||
netdev_features_t features)
|
netdev_features_t features)
|
||||||
{
|
{
|
||||||
struct net_device *real_dev = vlan_dev_priv(dev)->real_dev;
|
struct net_device *real_dev = vlan_dev_priv(dev)->real_dev;
|
||||||
u32 old_features = features;
|
netdev_features_t old_features = features;
|
||||||
|
|
||||||
features &= real_dev->vlan_features;
|
features &= real_dev->vlan_features;
|
||||||
features |= NETIF_F_RXCSUM;
|
features |= NETIF_F_RXCSUM;
|
||||||
|
@ -107,7 +107,7 @@ static void br_tcn_timer_expired(unsigned long arg)
|
|||||||
|
|
||||||
br_debug(br, "tcn timer expired\n");
|
br_debug(br, "tcn timer expired\n");
|
||||||
spin_lock(&br->lock);
|
spin_lock(&br->lock);
|
||||||
if (br->dev->flags & IFF_UP) {
|
if (!br_is_root_bridge(br) && (br->dev->flags & IFF_UP)) {
|
||||||
br_transmit_tcn(br);
|
br_transmit_tcn(br);
|
||||||
|
|
||||||
mod_timer(&br->tcn_timer,jiffies + br->bridge_hello_time);
|
mod_timer(&br->tcn_timer,jiffies + br->bridge_hello_time);
|
||||||
|
@ -2456,7 +2456,7 @@ EXPORT_SYMBOL(netif_skb_features);
|
|||||||
* 2. skb is fragmented and the device does not support SG.
|
* 2. skb is fragmented and the device does not support SG.
|
||||||
*/
|
*/
|
||||||
static inline int skb_needs_linearize(struct sk_buff *skb,
|
static inline int skb_needs_linearize(struct sk_buff *skb,
|
||||||
int features)
|
netdev_features_t features)
|
||||||
{
|
{
|
||||||
return skb_is_nonlinear(skb) &&
|
return skb_is_nonlinear(skb) &&
|
||||||
((skb_has_frag_list(skb) &&
|
((skb_has_frag_list(skb) &&
|
||||||
|
@ -1421,7 +1421,7 @@ int dev_ethtool(struct net *net, struct ifreq *ifr)
|
|||||||
void __user *useraddr = ifr->ifr_data;
|
void __user *useraddr = ifr->ifr_data;
|
||||||
u32 ethcmd;
|
u32 ethcmd;
|
||||||
int rc;
|
int rc;
|
||||||
u32 old_features;
|
netdev_features_t old_features;
|
||||||
|
|
||||||
if (!dev || !netif_device_present(dev))
|
if (!dev || !netif_device_present(dev))
|
||||||
return -ENODEV;
|
return -ENODEV;
|
||||||
|
@ -1293,6 +1293,7 @@ static struct sk_buff *inet_gso_segment(struct sk_buff *skb,
|
|||||||
SKB_GSO_DODGY |
|
SKB_GSO_DODGY |
|
||||||
SKB_GSO_TCP_ECN |
|
SKB_GSO_TCP_ECN |
|
||||||
SKB_GSO_GRE |
|
SKB_GSO_GRE |
|
||||||
|
SKB_GSO_TCPV6 |
|
||||||
SKB_GSO_UDP_TUNNEL |
|
SKB_GSO_UDP_TUNNEL |
|
||||||
0)))
|
0)))
|
||||||
goto out;
|
goto out;
|
||||||
|
@ -121,6 +121,7 @@ static struct sk_buff *gre_gso_segment(struct sk_buff *skb,
|
|||||||
int ghl = GRE_HEADER_SECTION;
|
int ghl = GRE_HEADER_SECTION;
|
||||||
struct gre_base_hdr *greh;
|
struct gre_base_hdr *greh;
|
||||||
int mac_len = skb->mac_len;
|
int mac_len = skb->mac_len;
|
||||||
|
__be16 protocol = skb->protocol;
|
||||||
int tnl_hlen;
|
int tnl_hlen;
|
||||||
bool csum;
|
bool csum;
|
||||||
|
|
||||||
@ -150,7 +151,7 @@ static struct sk_buff *gre_gso_segment(struct sk_buff *skb,
|
|||||||
|
|
||||||
/* setup inner skb. */
|
/* setup inner skb. */
|
||||||
if (greh->protocol == htons(ETH_P_TEB)) {
|
if (greh->protocol == htons(ETH_P_TEB)) {
|
||||||
struct ethhdr *eth = eth_hdr(skb);
|
struct ethhdr *eth = (struct ethhdr *)skb_inner_mac_header(skb);
|
||||||
skb->protocol = eth->h_proto;
|
skb->protocol = eth->h_proto;
|
||||||
} else {
|
} else {
|
||||||
skb->protocol = greh->protocol;
|
skb->protocol = greh->protocol;
|
||||||
@ -199,6 +200,7 @@ static struct sk_buff *gre_gso_segment(struct sk_buff *skb,
|
|||||||
skb_reset_mac_header(skb);
|
skb_reset_mac_header(skb);
|
||||||
skb_set_network_header(skb, mac_len);
|
skb_set_network_header(skb, mac_len);
|
||||||
skb->mac_len = mac_len;
|
skb->mac_len = mac_len;
|
||||||
|
skb->protocol = protocol;
|
||||||
} while ((skb = skb->next));
|
} while ((skb = skb->next));
|
||||||
out:
|
out:
|
||||||
return segs;
|
return segs;
|
||||||
|
@ -2311,8 +2311,10 @@ static struct sk_buff *skb_udp_tunnel_segment(struct sk_buff *skb,
|
|||||||
struct sk_buff *segs = ERR_PTR(-EINVAL);
|
struct sk_buff *segs = ERR_PTR(-EINVAL);
|
||||||
int mac_len = skb->mac_len;
|
int mac_len = skb->mac_len;
|
||||||
int tnl_hlen = skb_inner_mac_header(skb) - skb_transport_header(skb);
|
int tnl_hlen = skb_inner_mac_header(skb) - skb_transport_header(skb);
|
||||||
int outer_hlen;
|
struct ethhdr *inner_eth = (struct ethhdr *)skb_inner_mac_header(skb);
|
||||||
|
__be16 protocol = skb->protocol;
|
||||||
netdev_features_t enc_features;
|
netdev_features_t enc_features;
|
||||||
|
int outer_hlen;
|
||||||
|
|
||||||
if (unlikely(!pskb_may_pull(skb, tnl_hlen)))
|
if (unlikely(!pskb_may_pull(skb, tnl_hlen)))
|
||||||
goto out;
|
goto out;
|
||||||
@ -2322,6 +2324,8 @@ static struct sk_buff *skb_udp_tunnel_segment(struct sk_buff *skb,
|
|||||||
skb_reset_mac_header(skb);
|
skb_reset_mac_header(skb);
|
||||||
skb_set_network_header(skb, skb_inner_network_offset(skb));
|
skb_set_network_header(skb, skb_inner_network_offset(skb));
|
||||||
skb->mac_len = skb_inner_network_offset(skb);
|
skb->mac_len = skb_inner_network_offset(skb);
|
||||||
|
inner_eth = (struct ethhdr *)skb_mac_header(skb);
|
||||||
|
skb->protocol = inner_eth->h_proto;
|
||||||
|
|
||||||
/* segment inner packet. */
|
/* segment inner packet. */
|
||||||
enc_features = skb->dev->hw_enc_features & netif_skb_features(skb);
|
enc_features = skb->dev->hw_enc_features & netif_skb_features(skb);
|
||||||
@ -2358,6 +2362,7 @@ static struct sk_buff *skb_udp_tunnel_segment(struct sk_buff *skb,
|
|||||||
|
|
||||||
}
|
}
|
||||||
skb->ip_summed = CHECKSUM_NONE;
|
skb->ip_summed = CHECKSUM_NONE;
|
||||||
|
skb->protocol = protocol;
|
||||||
} while ((skb = skb->next));
|
} while ((skb = skb->next));
|
||||||
out:
|
out:
|
||||||
return segs;
|
return segs;
|
||||||
|
@ -742,36 +742,33 @@ static void prb_open_block(struct tpacket_kbdq_core *pkc1,
|
|||||||
|
|
||||||
smp_rmb();
|
smp_rmb();
|
||||||
|
|
||||||
if (likely(TP_STATUS_KERNEL == BLOCK_STATUS(pbd1))) {
|
|
||||||
|
|
||||||
/* We could have just memset this but we will lose the
|
/* We could have just memset this but we will lose the
|
||||||
* flexibility of making the priv area sticky
|
* flexibility of making the priv area sticky
|
||||||
*/
|
*/
|
||||||
|
|
||||||
BLOCK_SNUM(pbd1) = pkc1->knxt_seq_num++;
|
BLOCK_SNUM(pbd1) = pkc1->knxt_seq_num++;
|
||||||
BLOCK_NUM_PKTS(pbd1) = 0;
|
BLOCK_NUM_PKTS(pbd1) = 0;
|
||||||
BLOCK_LEN(pbd1) = BLK_PLUS_PRIV(pkc1->blk_sizeof_priv);
|
BLOCK_LEN(pbd1) = BLK_PLUS_PRIV(pkc1->blk_sizeof_priv);
|
||||||
|
|
||||||
getnstimeofday(&ts);
|
getnstimeofday(&ts);
|
||||||
|
|
||||||
h1->ts_first_pkt.ts_sec = ts.tv_sec;
|
h1->ts_first_pkt.ts_sec = ts.tv_sec;
|
||||||
h1->ts_first_pkt.ts_nsec = ts.tv_nsec;
|
h1->ts_first_pkt.ts_nsec = ts.tv_nsec;
|
||||||
|
|
||||||
pkc1->pkblk_start = (char *)pbd1;
|
pkc1->pkblk_start = (char *)pbd1;
|
||||||
pkc1->nxt_offset = pkc1->pkblk_start + BLK_PLUS_PRIV(pkc1->blk_sizeof_priv);
|
pkc1->nxt_offset = pkc1->pkblk_start + BLK_PLUS_PRIV(pkc1->blk_sizeof_priv);
|
||||||
|
|
||||||
BLOCK_O2FP(pbd1) = (__u32)BLK_PLUS_PRIV(pkc1->blk_sizeof_priv);
|
BLOCK_O2FP(pbd1) = (__u32)BLK_PLUS_PRIV(pkc1->blk_sizeof_priv);
|
||||||
BLOCK_O2PRIV(pbd1) = BLK_HDR_LEN;
|
BLOCK_O2PRIV(pbd1) = BLK_HDR_LEN;
|
||||||
|
|
||||||
pbd1->version = pkc1->version;
|
pbd1->version = pkc1->version;
|
||||||
pkc1->prev = pkc1->nxt_offset;
|
pkc1->prev = pkc1->nxt_offset;
|
||||||
pkc1->pkblk_end = pkc1->pkblk_start + pkc1->kblk_size;
|
pkc1->pkblk_end = pkc1->pkblk_start + pkc1->kblk_size;
|
||||||
|
|
||||||
prb_thaw_queue(pkc1);
|
prb_thaw_queue(pkc1);
|
||||||
_prb_refresh_rx_retire_blk_timer(pkc1);
|
_prb_refresh_rx_retire_blk_timer(pkc1);
|
||||||
|
|
||||||
smp_wmb();
|
smp_wmb();
|
||||||
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
|
|
||||||
WARN(1, "ERROR block:%p is NOT FREE status:%d kactive_blk_num:%d\n",
|
|
||||||
pbd1, BLOCK_STATUS(pbd1), pkc1->kactive_blk_num);
|
|
||||||
dump_stack();
|
|
||||||
BUG();
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
@ -862,10 +859,6 @@ static void prb_retire_current_block(struct tpacket_kbdq_core *pkc,
|
|||||||
prb_close_block(pkc, pbd, po, status);
|
prb_close_block(pkc, pbd, po, status);
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
WARN(1, "ERROR-pbd[%d]:%p\n", pkc->kactive_blk_num, pbd);
|
|
||||||
dump_stack();
|
|
||||||
BUG();
|
|
||||||
}
|
}
|
||||||
|
|
||||||
static int prb_curr_blk_in_use(struct tpacket_kbdq_core *pkc,
|
static int prb_curr_blk_in_use(struct tpacket_kbdq_core *pkc,
|
||||||
|
@ -584,8 +584,7 @@ static int tipc_bcbearer_send(struct sk_buff *buf,
|
|||||||
{
|
{
|
||||||
int bp_index;
|
int bp_index;
|
||||||
|
|
||||||
/*
|
/* Prepare broadcast link message for reliable transmission,
|
||||||
* Prepare broadcast link message for reliable transmission,
|
|
||||||
* if first time trying to send it;
|
* if first time trying to send it;
|
||||||
* preparation is skipped for broadcast link protocol messages
|
* preparation is skipped for broadcast link protocol messages
|
||||||
* since they are sent in an unreliable manner and don't need it
|
* since they are sent in an unreliable manner and don't need it
|
||||||
@ -611,30 +610,43 @@ static int tipc_bcbearer_send(struct sk_buff *buf,
|
|||||||
for (bp_index = 0; bp_index < MAX_BEARERS; bp_index++) {
|
for (bp_index = 0; bp_index < MAX_BEARERS; bp_index++) {
|
||||||
struct tipc_bearer *p = bcbearer->bpairs[bp_index].primary;
|
struct tipc_bearer *p = bcbearer->bpairs[bp_index].primary;
|
||||||
struct tipc_bearer *s = bcbearer->bpairs[bp_index].secondary;
|
struct tipc_bearer *s = bcbearer->bpairs[bp_index].secondary;
|
||||||
|
struct tipc_bearer *b = p;
|
||||||
|
struct sk_buff *tbuf;
|
||||||
|
|
||||||
if (!p)
|
if (!p)
|
||||||
break; /* no more bearers to try */
|
break; /* No more bearers to try */
|
||||||
|
|
||||||
tipc_nmap_diff(&bcbearer->remains, &p->nodes, &bcbearer->remains_new);
|
if (tipc_bearer_blocked(p)) {
|
||||||
|
if (!s || tipc_bearer_blocked(s))
|
||||||
|
continue; /* Can't use either bearer */
|
||||||
|
b = s;
|
||||||
|
}
|
||||||
|
|
||||||
|
tipc_nmap_diff(&bcbearer->remains, &b->nodes,
|
||||||
|
&bcbearer->remains_new);
|
||||||
if (bcbearer->remains_new.count == bcbearer->remains.count)
|
if (bcbearer->remains_new.count == bcbearer->remains.count)
|
||||||
continue; /* bearer pair doesn't add anything */
|
continue; /* Nothing added by bearer pair */
|
||||||
|
|
||||||
if (!tipc_bearer_blocked(p))
|
if (bp_index == 0) {
|
||||||
tipc_bearer_send(p, buf, &p->bcast_addr);
|
/* Use original buffer for first bearer */
|
||||||
else if (s && !tipc_bearer_blocked(s))
|
tipc_bearer_send(b, buf, &b->bcast_addr);
|
||||||
/* unable to send on primary bearer */
|
} else {
|
||||||
tipc_bearer_send(s, buf, &s->bcast_addr);
|
/* Avoid concurrent buffer access */
|
||||||
else
|
tbuf = pskb_copy(buf, GFP_ATOMIC);
|
||||||
/* unable to send on either bearer */
|
if (!tbuf)
|
||||||
continue;
|
break;
|
||||||
|
tipc_bearer_send(b, tbuf, &b->bcast_addr);
|
||||||
|
kfree_skb(tbuf); /* Bearer keeps a clone */
|
||||||
|
}
|
||||||
|
|
||||||
|
/* Swap bearers for next packet */
|
||||||
if (s) {
|
if (s) {
|
||||||
bcbearer->bpairs[bp_index].primary = s;
|
bcbearer->bpairs[bp_index].primary = s;
|
||||||
bcbearer->bpairs[bp_index].secondary = p;
|
bcbearer->bpairs[bp_index].secondary = p;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (bcbearer->remains_new.count == 0)
|
if (bcbearer->remains_new.count == 0)
|
||||||
break; /* all targets reached */
|
break; /* All targets reached */
|
||||||
|
|
||||||
bcbearer->remains = bcbearer->remains_new;
|
bcbearer->remains = bcbearer->remains_new;
|
||||||
}
|
}
|
||||||
|
Loading…
Reference in New Issue
Block a user