Merge branch 's390-qeth-next'
Julian Wiedmann says: ==================== s390/qeth: updates 2019-08-23 please apply one more round of qeth patches. These implement support for a bunch of TX-related features - namely TX NAPI, BQL and xmit_more. Note that this includes two qdio patches which lay the necessary groundwork, and have been acked by Vasily. ==================== Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
commit
ace4cedea9
@ -16,6 +16,7 @@
|
||||
#define QDIO_MAX_QUEUES_PER_IRQ 4
|
||||
#define QDIO_MAX_BUFFERS_PER_Q 128
|
||||
#define QDIO_MAX_BUFFERS_MASK (QDIO_MAX_BUFFERS_PER_Q - 1)
|
||||
#define QDIO_BUFNR(num) ((num) & QDIO_MAX_BUFFERS_MASK)
|
||||
#define QDIO_MAX_ELEMENTS_PER_BUFFER 16
|
||||
#define QDIO_SBAL_SIZE 256
|
||||
|
||||
@ -359,7 +360,7 @@ struct qdio_initialize {
|
||||
qdio_handler_t *output_handler;
|
||||
void (**queue_start_poll_array) (struct ccw_device *, int,
|
||||
unsigned long);
|
||||
int scan_threshold;
|
||||
unsigned int scan_threshold;
|
||||
unsigned long int_parm;
|
||||
struct qdio_buffer **input_sbal_addr_array;
|
||||
struct qdio_buffer **output_sbal_addr_array;
|
||||
@ -416,6 +417,9 @@ extern int do_QDIO(struct ccw_device *, unsigned int, int, unsigned int,
|
||||
extern int qdio_start_irq(struct ccw_device *, int);
|
||||
extern int qdio_stop_irq(struct ccw_device *, int);
|
||||
extern int qdio_get_next_buffers(struct ccw_device *, int, int *, int *);
|
||||
extern int qdio_inspect_queue(struct ccw_device *cdev, unsigned int nr,
|
||||
bool is_input, unsigned int *bufnr,
|
||||
unsigned int *error);
|
||||
extern int qdio_shutdown(struct ccw_device *, int);
|
||||
extern int qdio_free(struct ccw_device *);
|
||||
extern int qdio_get_ssqd_desc(struct ccw_device *, struct qdio_ssqd_desc *);
|
||||
|
@ -206,8 +206,6 @@ struct qdio_output_q {
|
||||
struct qdio_outbuf_state *sbal_state;
|
||||
/* timer to check for more outbound work */
|
||||
struct timer_list timer;
|
||||
/* used SBALs before tasklet schedule */
|
||||
int scan_threshold;
|
||||
};
|
||||
|
||||
/*
|
||||
@ -295,6 +293,7 @@ struct qdio_irq {
|
||||
struct qdio_ssqd_desc ssqd_desc;
|
||||
void (*orig_handler) (struct ccw_device *, unsigned long, struct irb *);
|
||||
|
||||
unsigned int scan_threshold; /* used SBALs before tasklet schedule */
|
||||
int perf_stat_enabled;
|
||||
|
||||
struct qdr *qdr;
|
||||
|
@ -647,8 +647,6 @@ static void qdio_kick_handler(struct qdio_q *q, unsigned int count)
|
||||
qperf_inc(q, outbound_handler);
|
||||
DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "koh: s:%02x c:%02x",
|
||||
start, count);
|
||||
if (q->u.out.use_cq)
|
||||
qdio_handle_aobs(q, start, count);
|
||||
}
|
||||
|
||||
q->handler(q->irq_ptr->cdev, q->qdio_error, q->nr, start, count,
|
||||
@ -774,8 +772,11 @@ static inline int qdio_outbound_q_moved(struct qdio_q *q, unsigned int start)
|
||||
|
||||
count = get_outbound_buffer_frontier(q, start);
|
||||
|
||||
if (count)
|
||||
if (count) {
|
||||
DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "out moved:%1d", q->nr);
|
||||
if (q->u.out.use_cq)
|
||||
qdio_handle_aobs(q, start, count);
|
||||
}
|
||||
|
||||
return count;
|
||||
}
|
||||
@ -879,7 +880,7 @@ static inline void qdio_check_outbound_pci_queues(struct qdio_irq *irq)
|
||||
struct qdio_q *out;
|
||||
int i;
|
||||
|
||||
if (!pci_out_supported(irq))
|
||||
if (!pci_out_supported(irq) || !irq->scan_threshold)
|
||||
return;
|
||||
|
||||
for_each_output_queue(irq, out, i)
|
||||
@ -972,7 +973,7 @@ static void qdio_int_handler_pci(struct qdio_irq *irq_ptr)
|
||||
}
|
||||
}
|
||||
|
||||
if (!pci_out_supported(irq_ptr))
|
||||
if (!pci_out_supported(irq_ptr) || !irq_ptr->scan_threshold)
|
||||
return;
|
||||
|
||||
for_each_output_queue(irq_ptr, q, i) {
|
||||
@ -1527,6 +1528,7 @@ set:
|
||||
static int handle_outbound(struct qdio_q *q, unsigned int callflags,
|
||||
int bufnr, int count)
|
||||
{
|
||||
const unsigned int scan_threshold = q->irq_ptr->scan_threshold;
|
||||
unsigned char state = 0;
|
||||
int used, rc = 0;
|
||||
|
||||
@ -1565,8 +1567,12 @@ static int handle_outbound(struct qdio_q *q, unsigned int callflags,
|
||||
rc = qdio_kick_outbound_q(q, 0);
|
||||
}
|
||||
|
||||
/* Let drivers implement their own completion scanning: */
|
||||
if (!scan_threshold)
|
||||
return rc;
|
||||
|
||||
/* in case of SIGA errors we must process the error immediately */
|
||||
if (used >= q->u.out.scan_threshold || rc)
|
||||
if (used >= scan_threshold || rc)
|
||||
qdio_tasklet_schedule(q);
|
||||
else
|
||||
/* free the SBALs in case of no further traffic */
|
||||
@ -1655,6 +1661,44 @@ rescan:
|
||||
}
|
||||
EXPORT_SYMBOL(qdio_start_irq);
|
||||
|
||||
static int __qdio_inspect_queue(struct qdio_q *q, unsigned int *bufnr,
|
||||
unsigned int *error)
|
||||
{
|
||||
unsigned int start = q->first_to_check;
|
||||
int count;
|
||||
|
||||
count = q->is_input_q ? qdio_inbound_q_moved(q, start) :
|
||||
qdio_outbound_q_moved(q, start);
|
||||
if (count == 0)
|
||||
return 0;
|
||||
|
||||
*bufnr = start;
|
||||
*error = q->qdio_error;
|
||||
|
||||
/* for the next time */
|
||||
q->first_to_check = add_buf(start, count);
|
||||
q->qdio_error = 0;
|
||||
|
||||
return count;
|
||||
}
|
||||
|
||||
int qdio_inspect_queue(struct ccw_device *cdev, unsigned int nr, bool is_input,
|
||||
unsigned int *bufnr, unsigned int *error)
|
||||
{
|
||||
struct qdio_irq *irq_ptr = cdev->private->qdio_data;
|
||||
struct qdio_q *q;
|
||||
|
||||
if (!irq_ptr)
|
||||
return -ENODEV;
|
||||
q = is_input ? irq_ptr->input_qs[nr] : irq_ptr->output_qs[nr];
|
||||
|
||||
if (need_siga_sync(q))
|
||||
qdio_siga_sync_q(q);
|
||||
|
||||
return __qdio_inspect_queue(q, bufnr, error);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(qdio_inspect_queue);
|
||||
|
||||
/**
|
||||
* qdio_get_next_buffers - process input buffers
|
||||
* @cdev: associated ccw_device for the qdio subchannel
|
||||
@ -1672,13 +1716,10 @@ int qdio_get_next_buffers(struct ccw_device *cdev, int nr, int *bufnr,
|
||||
{
|
||||
struct qdio_q *q;
|
||||
struct qdio_irq *irq_ptr = cdev->private->qdio_data;
|
||||
unsigned int start;
|
||||
int count;
|
||||
|
||||
if (!irq_ptr)
|
||||
return -ENODEV;
|
||||
q = irq_ptr->input_qs[nr];
|
||||
start = q->first_to_check;
|
||||
|
||||
/*
|
||||
* Cannot rely on automatic sync after interrupt since queues may
|
||||
@ -1689,25 +1730,11 @@ int qdio_get_next_buffers(struct ccw_device *cdev, int nr, int *bufnr,
|
||||
|
||||
qdio_check_outbound_pci_queues(irq_ptr);
|
||||
|
||||
count = qdio_inbound_q_moved(q, start);
|
||||
if (count == 0)
|
||||
return 0;
|
||||
|
||||
start = add_buf(start, count);
|
||||
q->first_to_check = start;
|
||||
|
||||
/* Note: upper-layer MUST stop processing immediately here ... */
|
||||
if (unlikely(q->irq_ptr->state != QDIO_IRQ_STATE_ACTIVE))
|
||||
return -EIO;
|
||||
|
||||
*bufnr = q->first_to_kick;
|
||||
*error = q->qdio_error;
|
||||
|
||||
/* for the next time */
|
||||
q->first_to_kick = add_buf(q->first_to_kick, count);
|
||||
q->qdio_error = 0;
|
||||
|
||||
return count;
|
||||
return __qdio_inspect_queue(q, bufnr, error);
|
||||
}
|
||||
EXPORT_SYMBOL(qdio_get_next_buffers);
|
||||
|
||||
|
@ -248,7 +248,6 @@ static void setup_queues(struct qdio_irq *irq_ptr,
|
||||
output_sbal_state_array += QDIO_MAX_BUFFERS_PER_Q;
|
||||
|
||||
q->is_input_q = 0;
|
||||
q->u.out.scan_threshold = qdio_init->scan_threshold;
|
||||
setup_storage_lists(q, irq_ptr, output_sbal_array, i);
|
||||
output_sbal_array += QDIO_MAX_BUFFERS_PER_Q;
|
||||
|
||||
@ -474,6 +473,7 @@ int qdio_setup_irq(struct qdio_initialize *init_data)
|
||||
irq_ptr->nr_input_qs = init_data->no_input_qs;
|
||||
irq_ptr->nr_output_qs = init_data->no_output_qs;
|
||||
irq_ptr->cdev = init_data->cdev;
|
||||
irq_ptr->scan_threshold = init_data->scan_threshold;
|
||||
ccw_device_get_schid(irq_ptr->cdev, &irq_ptr->schid);
|
||||
setup_queues(irq_ptr, init_data);
|
||||
|
||||
|
@ -22,6 +22,7 @@
|
||||
#include <linux/hashtable.h>
|
||||
#include <linux/ip.h>
|
||||
#include <linux/refcount.h>
|
||||
#include <linux/timer.h>
|
||||
#include <linux/wait.h>
|
||||
#include <linux/workqueue.h>
|
||||
|
||||
@ -30,6 +31,7 @@
|
||||
#include <net/ipv6.h>
|
||||
#include <net/if_inet6.h>
|
||||
#include <net/addrconf.h>
|
||||
#include <net/sch_generic.h>
|
||||
#include <net/tcp.h>
|
||||
|
||||
#include <asm/debug.h>
|
||||
@ -376,6 +378,28 @@ enum qeth_header_ids {
|
||||
#define QETH_HDR_EXT_CSUM_TRANSP_REQ 0x20
|
||||
#define QETH_HDR_EXT_UDP 0x40 /*bit off for TCP*/
|
||||
|
||||
static inline bool qeth_l2_same_vlan(struct qeth_hdr_layer2 *h1,
|
||||
struct qeth_hdr_layer2 *h2)
|
||||
{
|
||||
return !((h1->flags[2] ^ h2->flags[2]) & QETH_LAYER2_FLAG_VLAN) &&
|
||||
h1->vlan_id == h2->vlan_id;
|
||||
}
|
||||
|
||||
static inline bool qeth_l3_iqd_same_vlan(struct qeth_hdr_layer3 *h1,
|
||||
struct qeth_hdr_layer3 *h2)
|
||||
{
|
||||
return !((h1->ext_flags ^ h2->ext_flags) & QETH_HDR_EXT_VLAN_FRAME) &&
|
||||
h1->vlan_id == h2->vlan_id;
|
||||
}
|
||||
|
||||
static inline bool qeth_l3_same_next_hop(struct qeth_hdr_layer3 *h1,
|
||||
struct qeth_hdr_layer3 *h2)
|
||||
{
|
||||
return !((h1->flags ^ h2->flags) & QETH_HDR_IPV6) &&
|
||||
ipv6_addr_equal(&h1->next_hop.ipv6_addr,
|
||||
&h2->next_hop.ipv6_addr);
|
||||
}
|
||||
|
||||
enum qeth_qdio_info_states {
|
||||
QETH_QDIO_UNINITIALIZED,
|
||||
QETH_QDIO_ALLOCATED,
|
||||
@ -424,6 +448,7 @@ struct qeth_qdio_out_buffer {
|
||||
struct qdio_buffer *buffer;
|
||||
atomic_t state;
|
||||
int next_element_to_fill;
|
||||
unsigned int bytes;
|
||||
struct sk_buff_head skb_list;
|
||||
int is_header[QDIO_MAX_ELEMENTS_PER_BUFFER];
|
||||
|
||||
@ -473,6 +498,8 @@ struct qeth_out_q_stats {
|
||||
u64 tso_bytes;
|
||||
u64 packing_mode_switch;
|
||||
u64 stopped;
|
||||
u64 completion_yield;
|
||||
u64 completion_timer;
|
||||
|
||||
/* rtnl_link_stats64 */
|
||||
u64 tx_packets;
|
||||
@ -481,6 +508,8 @@ struct qeth_out_q_stats {
|
||||
u64 tx_dropped;
|
||||
};
|
||||
|
||||
#define QETH_TX_TIMER_USECS 500
|
||||
|
||||
struct qeth_qdio_out_q {
|
||||
struct qdio_buffer *qdio_bufs[QDIO_MAX_BUFFERS_PER_Q];
|
||||
struct qeth_qdio_out_buffer *bufs[QDIO_MAX_BUFFERS_PER_Q];
|
||||
@ -499,13 +528,36 @@ struct qeth_qdio_out_q {
|
||||
atomic_t used_buffers;
|
||||
/* indicates whether PCI flag must be set (or if one is outstanding) */
|
||||
atomic_t set_pci_flags_count;
|
||||
struct napi_struct napi;
|
||||
struct timer_list timer;
|
||||
struct qeth_hdr *prev_hdr;
|
||||
u8 bulk_start;
|
||||
};
|
||||
|
||||
#define qeth_for_each_output_queue(card, q, i) \
|
||||
for (i = 0; i < card->qdio.no_out_queues && \
|
||||
(q = card->qdio.out_qs[i]); i++)
|
||||
|
||||
#define qeth_napi_to_out_queue(n) container_of(n, struct qeth_qdio_out_q, napi)
|
||||
|
||||
static inline void qeth_tx_arm_timer(struct qeth_qdio_out_q *queue)
|
||||
{
|
||||
if (timer_pending(&queue->timer))
|
||||
return;
|
||||
mod_timer(&queue->timer, usecs_to_jiffies(QETH_TX_TIMER_USECS) +
|
||||
jiffies);
|
||||
}
|
||||
|
||||
static inline bool qeth_out_queue_is_full(struct qeth_qdio_out_q *queue)
|
||||
{
|
||||
return atomic_read(&queue->used_buffers) >= QDIO_MAX_BUFFERS_PER_Q;
|
||||
}
|
||||
|
||||
static inline bool qeth_out_queue_is_empty(struct qeth_qdio_out_q *queue)
|
||||
{
|
||||
return atomic_read(&queue->used_buffers) == 0;
|
||||
}
|
||||
|
||||
struct qeth_qdio_info {
|
||||
atomic_t state;
|
||||
/* input */
|
||||
|
@ -71,7 +71,8 @@ static void qeth_free_qdio_queues(struct qeth_card *card);
|
||||
static void qeth_notify_skbs(struct qeth_qdio_out_q *queue,
|
||||
struct qeth_qdio_out_buffer *buf,
|
||||
enum iucv_tx_notify notification);
|
||||
static void qeth_release_skbs(struct qeth_qdio_out_buffer *buf);
|
||||
static void qeth_tx_complete_buf(struct qeth_qdio_out_buffer *buf, bool error,
|
||||
int budget);
|
||||
static int qeth_init_qdio_out_buf(struct qeth_qdio_out_q *, int);
|
||||
|
||||
static void qeth_close_dev_handler(struct work_struct *work)
|
||||
@ -411,7 +412,7 @@ static void qeth_cleanup_handled_pending(struct qeth_qdio_out_q *q, int bidx,
|
||||
/* release here to avoid interleaving between
|
||||
outbound tasklet and inbound tasklet
|
||||
regarding notifications and lifecycle */
|
||||
qeth_release_skbs(c);
|
||||
qeth_tx_complete_buf(c, forced_cleanup, 0);
|
||||
|
||||
c = f->next_pending;
|
||||
WARN_ON_ONCE(head->next_pending != f);
|
||||
@ -1077,22 +1078,52 @@ static void qeth_notify_skbs(struct qeth_qdio_out_q *q,
|
||||
}
|
||||
}
|
||||
|
||||
static void qeth_release_skbs(struct qeth_qdio_out_buffer *buf)
|
||||
static void qeth_tx_complete_buf(struct qeth_qdio_out_buffer *buf, bool error,
|
||||
int budget)
|
||||
{
|
||||
struct qeth_qdio_out_q *queue = buf->q;
|
||||
struct sk_buff *skb;
|
||||
|
||||
/* release may never happen from within CQ tasklet scope */
|
||||
WARN_ON_ONCE(atomic_read(&buf->state) == QETH_QDIO_BUF_IN_CQ);
|
||||
|
||||
if (atomic_read(&buf->state) == QETH_QDIO_BUF_PENDING)
|
||||
qeth_notify_skbs(buf->q, buf, TX_NOTIFY_GENERALERROR);
|
||||
qeth_notify_skbs(queue, buf, TX_NOTIFY_GENERALERROR);
|
||||
|
||||
while ((skb = __skb_dequeue(&buf->skb_list)) != NULL)
|
||||
consume_skb(skb);
|
||||
/* Empty buffer? */
|
||||
if (buf->next_element_to_fill == 0)
|
||||
return;
|
||||
|
||||
QETH_TXQ_STAT_INC(queue, bufs);
|
||||
QETH_TXQ_STAT_ADD(queue, buf_elements, buf->next_element_to_fill);
|
||||
while ((skb = __skb_dequeue(&buf->skb_list)) != NULL) {
|
||||
unsigned int bytes = qdisc_pkt_len(skb);
|
||||
bool is_tso = skb_is_gso(skb);
|
||||
unsigned int packets;
|
||||
|
||||
packets = is_tso ? skb_shinfo(skb)->gso_segs : 1;
|
||||
if (error) {
|
||||
QETH_TXQ_STAT_ADD(queue, tx_errors, packets);
|
||||
} else {
|
||||
QETH_TXQ_STAT_ADD(queue, tx_packets, packets);
|
||||
QETH_TXQ_STAT_ADD(queue, tx_bytes, bytes);
|
||||
if (skb->ip_summed == CHECKSUM_PARTIAL)
|
||||
QETH_TXQ_STAT_ADD(queue, skbs_csum, packets);
|
||||
if (skb_is_nonlinear(skb))
|
||||
QETH_TXQ_STAT_INC(queue, skbs_sg);
|
||||
if (is_tso) {
|
||||
QETH_TXQ_STAT_INC(queue, skbs_tso);
|
||||
QETH_TXQ_STAT_ADD(queue, tso_bytes, bytes);
|
||||
}
|
||||
}
|
||||
|
||||
napi_consume_skb(skb, budget);
|
||||
}
|
||||
}
|
||||
|
||||
static void qeth_clear_output_buffer(struct qeth_qdio_out_q *queue,
|
||||
struct qeth_qdio_out_buffer *buf)
|
||||
struct qeth_qdio_out_buffer *buf,
|
||||
bool error, int budget)
|
||||
{
|
||||
int i;
|
||||
|
||||
@ -1100,7 +1131,7 @@ static void qeth_clear_output_buffer(struct qeth_qdio_out_q *queue,
|
||||
if (buf->buffer->element[0].sflags & SBAL_SFLAGS0_PCI_REQ)
|
||||
atomic_dec(&queue->set_pci_flags_count);
|
||||
|
||||
qeth_release_skbs(buf);
|
||||
qeth_tx_complete_buf(buf, error, budget);
|
||||
|
||||
for (i = 0; i < queue->max_elements; ++i) {
|
||||
if (buf->buffer->element[i].addr && buf->is_header[i])
|
||||
@ -1111,6 +1142,7 @@ static void qeth_clear_output_buffer(struct qeth_qdio_out_q *queue,
|
||||
|
||||
qeth_scrub_qdio_buffer(buf->buffer, queue->max_elements);
|
||||
buf->next_element_to_fill = 0;
|
||||
buf->bytes = 0;
|
||||
atomic_set(&buf->state, QETH_QDIO_BUF_EMPTY);
|
||||
}
|
||||
|
||||
@ -1122,7 +1154,7 @@ static void qeth_drain_output_queue(struct qeth_qdio_out_q *q, bool free)
|
||||
if (!q->bufs[j])
|
||||
continue;
|
||||
qeth_cleanup_handled_pending(q, j, 1);
|
||||
qeth_clear_output_buffer(q, q->bufs[j]);
|
||||
qeth_clear_output_buffer(q, q->bufs[j], true, 0);
|
||||
if (free) {
|
||||
kmem_cache_free(qeth_qdio_outbuf_cache, q->bufs[j]);
|
||||
q->bufs[j] = NULL;
|
||||
@ -2255,6 +2287,14 @@ static struct qeth_qdio_out_q *qeth_alloc_output_queue(void)
|
||||
return q;
|
||||
}
|
||||
|
||||
static void qeth_tx_completion_timer(struct timer_list *timer)
|
||||
{
|
||||
struct qeth_qdio_out_q *queue = from_timer(queue, timer, timer);
|
||||
|
||||
napi_schedule(&queue->napi);
|
||||
QETH_TXQ_STAT_INC(queue, completion_timer);
|
||||
}
|
||||
|
||||
static int qeth_alloc_qdio_queues(struct qeth_card *card)
|
||||
{
|
||||
int i, j;
|
||||
@ -2276,17 +2316,22 @@ static int qeth_alloc_qdio_queues(struct qeth_card *card)
|
||||
|
||||
/* outbound */
|
||||
for (i = 0; i < card->qdio.no_out_queues; ++i) {
|
||||
card->qdio.out_qs[i] = qeth_alloc_output_queue();
|
||||
if (!card->qdio.out_qs[i])
|
||||
struct qeth_qdio_out_q *queue;
|
||||
|
||||
queue = qeth_alloc_output_queue();
|
||||
if (!queue)
|
||||
goto out_freeoutq;
|
||||
QETH_CARD_TEXT_(card, 2, "outq %i", i);
|
||||
QETH_CARD_HEX(card, 2, &card->qdio.out_qs[i], sizeof(void *));
|
||||
card->qdio.out_qs[i]->card = card;
|
||||
card->qdio.out_qs[i]->queue_no = i;
|
||||
QETH_CARD_HEX(card, 2, &queue, sizeof(void *));
|
||||
card->qdio.out_qs[i] = queue;
|
||||
queue->card = card;
|
||||
queue->queue_no = i;
|
||||
timer_setup(&queue->timer, qeth_tx_completion_timer, 0);
|
||||
|
||||
/* give outbound qeth_qdio_buffers their qdio_buffers */
|
||||
for (j = 0; j < QDIO_MAX_BUFFERS_PER_Q; ++j) {
|
||||
WARN_ON(card->qdio.out_qs[i]->bufs[j] != NULL);
|
||||
if (qeth_init_qdio_out_buf(card->qdio.out_qs[i], j))
|
||||
WARN_ON(queue->bufs[j]);
|
||||
if (qeth_init_qdio_out_buf(queue, j))
|
||||
goto out_freeoutqbufs;
|
||||
}
|
||||
}
|
||||
@ -2626,9 +2671,12 @@ int qeth_init_qdio_queues(struct qeth_card *card)
|
||||
queue->max_elements = QETH_MAX_BUFFER_ELEMENTS(card);
|
||||
queue->next_buf_to_fill = 0;
|
||||
queue->do_pack = 0;
|
||||
queue->prev_hdr = NULL;
|
||||
queue->bulk_start = 0;
|
||||
atomic_set(&queue->used_buffers, 0);
|
||||
atomic_set(&queue->set_pci_flags_count, 0);
|
||||
atomic_set(&queue->state, QETH_OUT_Q_UNLOCKED);
|
||||
netdev_tx_reset_queue(netdev_get_tx_queue(card->dev, i));
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
@ -3197,6 +3245,7 @@ static int qeth_switch_to_nonpacking_if_needed(struct qeth_qdio_out_q *queue)
|
||||
static void qeth_flush_buffers(struct qeth_qdio_out_q *queue, int index,
|
||||
int count)
|
||||
{
|
||||
struct qeth_card *card = queue->card;
|
||||
struct qeth_qdio_out_buffer *buf;
|
||||
int rc;
|
||||
int i;
|
||||
@ -3240,14 +3289,17 @@ static void qeth_flush_buffers(struct qeth_qdio_out_q *queue, int index,
|
||||
}
|
||||
}
|
||||
|
||||
QETH_TXQ_STAT_ADD(queue, bufs, count);
|
||||
qdio_flags = QDIO_FLAG_SYNC_OUTPUT;
|
||||
if (atomic_read(&queue->set_pci_flags_count))
|
||||
qdio_flags |= QDIO_FLAG_PCI_OUT;
|
||||
rc = do_QDIO(CARD_DDEV(queue->card), qdio_flags,
|
||||
queue->queue_no, index, count);
|
||||
|
||||
/* Fake the TX completion interrupt: */
|
||||
if (IS_IQD(card))
|
||||
napi_schedule(&queue->napi);
|
||||
|
||||
if (rc) {
|
||||
QETH_TXQ_STAT_ADD(queue, tx_errors, count);
|
||||
/* ignore temporary SIGA errors without busy condition */
|
||||
if (rc == -ENOBUFS)
|
||||
return;
|
||||
@ -3264,6 +3316,14 @@ static void qeth_flush_buffers(struct qeth_qdio_out_q *queue, int index,
|
||||
}
|
||||
}
|
||||
|
||||
static void qeth_flush_queue(struct qeth_qdio_out_q *queue)
|
||||
{
|
||||
qeth_flush_buffers(queue, queue->bulk_start, 1);
|
||||
|
||||
queue->bulk_start = QDIO_BUFNR(queue->bulk_start + 1);
|
||||
queue->prev_hdr = NULL;
|
||||
}
|
||||
|
||||
static void qeth_check_outbound_queue(struct qeth_qdio_out_q *queue)
|
||||
{
|
||||
int index;
|
||||
@ -3425,48 +3485,12 @@ static void qeth_qdio_output_handler(struct ccw_device *ccwdev,
|
||||
int bidx = i % QDIO_MAX_BUFFERS_PER_Q;
|
||||
buffer = queue->bufs[bidx];
|
||||
qeth_handle_send_error(card, buffer, qdio_error);
|
||||
|
||||
if (queue->bufstates &&
|
||||
(queue->bufstates[bidx].flags &
|
||||
QDIO_OUTBUF_STATE_FLAG_PENDING) != 0) {
|
||||
WARN_ON_ONCE(card->options.cq != QETH_CQ_ENABLED);
|
||||
|
||||
if (atomic_cmpxchg(&buffer->state,
|
||||
QETH_QDIO_BUF_PRIMED,
|
||||
QETH_QDIO_BUF_PENDING) ==
|
||||
QETH_QDIO_BUF_PRIMED) {
|
||||
qeth_notify_skbs(queue, buffer,
|
||||
TX_NOTIFY_PENDING);
|
||||
}
|
||||
QETH_CARD_TEXT_(queue->card, 5, "pel%d", bidx);
|
||||
|
||||
/* prepare the queue slot for re-use: */
|
||||
qeth_scrub_qdio_buffer(buffer->buffer,
|
||||
queue->max_elements);
|
||||
if (qeth_init_qdio_out_buf(queue, bidx)) {
|
||||
QETH_CARD_TEXT(card, 2, "outofbuf");
|
||||
qeth_schedule_recovery(card);
|
||||
}
|
||||
} else {
|
||||
if (card->options.cq == QETH_CQ_ENABLED) {
|
||||
enum iucv_tx_notify n;
|
||||
|
||||
n = qeth_compute_cq_notification(
|
||||
buffer->buffer->element[15].sflags, 0);
|
||||
qeth_notify_skbs(queue, buffer, n);
|
||||
}
|
||||
|
||||
qeth_clear_output_buffer(queue, buffer);
|
||||
}
|
||||
qeth_cleanup_handled_pending(queue, bidx, 0);
|
||||
qeth_clear_output_buffer(queue, buffer, qdio_error, 0);
|
||||
}
|
||||
atomic_sub(count, &queue->used_buffers);
|
||||
/* check if we need to do something on this outbound queue */
|
||||
if (!IS_IQD(card))
|
||||
qeth_check_outbound_queue(queue);
|
||||
|
||||
if (IS_IQD(card))
|
||||
__queue = qeth_iqd_translate_txq(dev, __queue);
|
||||
atomic_sub(count, &queue->used_buffers);
|
||||
qeth_check_outbound_queue(queue);
|
||||
|
||||
txq = netdev_get_tx_queue(dev, __queue);
|
||||
/* xmit may have observed the full-condition, but not yet stopped the
|
||||
* txq. In which case the code below won't trigger. So before returning,
|
||||
@ -3655,9 +3679,32 @@ check_layout:
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void __qeth_fill_buffer(struct sk_buff *skb,
|
||||
struct qeth_qdio_out_buffer *buf,
|
||||
bool is_first_elem, unsigned int offset)
|
||||
static bool qeth_iqd_may_bulk(struct qeth_qdio_out_q *queue,
|
||||
struct qeth_qdio_out_buffer *buffer,
|
||||
struct sk_buff *curr_skb,
|
||||
struct qeth_hdr *curr_hdr)
|
||||
{
|
||||
struct qeth_hdr *prev_hdr = queue->prev_hdr;
|
||||
|
||||
if (!prev_hdr)
|
||||
return true;
|
||||
|
||||
/* All packets must have the same target: */
|
||||
if (curr_hdr->hdr.l2.id == QETH_HEADER_TYPE_LAYER2) {
|
||||
struct sk_buff *prev_skb = skb_peek(&buffer->skb_list);
|
||||
|
||||
return ether_addr_equal(eth_hdr(prev_skb)->h_dest,
|
||||
eth_hdr(curr_skb)->h_dest) &&
|
||||
qeth_l2_same_vlan(&prev_hdr->hdr.l2, &curr_hdr->hdr.l2);
|
||||
}
|
||||
|
||||
return qeth_l3_same_next_hop(&prev_hdr->hdr.l3, &curr_hdr->hdr.l3) &&
|
||||
qeth_l3_iqd_same_vlan(&prev_hdr->hdr.l3, &curr_hdr->hdr.l3);
|
||||
}
|
||||
|
||||
static unsigned int __qeth_fill_buffer(struct sk_buff *skb,
|
||||
struct qeth_qdio_out_buffer *buf,
|
||||
bool is_first_elem, unsigned int offset)
|
||||
{
|
||||
struct qdio_buffer *buffer = buf->buffer;
|
||||
int element = buf->next_element_to_fill;
|
||||
@ -3714,24 +3761,21 @@ static void __qeth_fill_buffer(struct sk_buff *skb,
|
||||
if (buffer->element[element - 1].eflags)
|
||||
buffer->element[element - 1].eflags = SBAL_EFLAGS_LAST_FRAG;
|
||||
buf->next_element_to_fill = element;
|
||||
return element;
|
||||
}
|
||||
|
||||
/**
|
||||
* qeth_fill_buffer() - map skb into an output buffer
|
||||
* @queue: QDIO queue to submit the buffer on
|
||||
* @buf: buffer to transport the skb
|
||||
* @skb: skb to map into the buffer
|
||||
* @hdr: qeth_hdr for this skb. Either at skb->data, or allocated
|
||||
* from qeth_core_header_cache.
|
||||
* @offset: when mapping the skb, start at skb->data + offset
|
||||
* @hd_len: if > 0, build a dedicated header element of this size
|
||||
* flush: Prepare the buffer to be flushed, regardless of its fill level.
|
||||
*/
|
||||
static int qeth_fill_buffer(struct qeth_qdio_out_q *queue,
|
||||
struct qeth_qdio_out_buffer *buf,
|
||||
struct sk_buff *skb, struct qeth_hdr *hdr,
|
||||
unsigned int offset, unsigned int hd_len,
|
||||
bool flush)
|
||||
static unsigned int qeth_fill_buffer(struct qeth_qdio_out_buffer *buf,
|
||||
struct sk_buff *skb, struct qeth_hdr *hdr,
|
||||
unsigned int offset, unsigned int hd_len)
|
||||
{
|
||||
struct qdio_buffer *buffer = buf->buffer;
|
||||
bool is_first_elem = true;
|
||||
@ -3751,35 +3795,22 @@ static int qeth_fill_buffer(struct qeth_qdio_out_q *queue,
|
||||
buf->next_element_to_fill++;
|
||||
}
|
||||
|
||||
__qeth_fill_buffer(skb, buf, is_first_elem, offset);
|
||||
|
||||
if (!queue->do_pack) {
|
||||
QETH_CARD_TEXT(queue->card, 6, "fillbfnp");
|
||||
} else {
|
||||
QETH_CARD_TEXT(queue->card, 6, "fillbfpa");
|
||||
|
||||
QETH_TXQ_STAT_INC(queue, skbs_pack);
|
||||
/* If the buffer still has free elements, keep using it. */
|
||||
if (!flush &&
|
||||
buf->next_element_to_fill < queue->max_elements)
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* flush out the buffer */
|
||||
atomic_set(&buf->state, QETH_QDIO_BUF_PRIMED);
|
||||
queue->next_buf_to_fill = (queue->next_buf_to_fill + 1) %
|
||||
QDIO_MAX_BUFFERS_PER_Q;
|
||||
return 1;
|
||||
return __qeth_fill_buffer(skb, buf, is_first_elem, offset);
|
||||
}
|
||||
|
||||
static int qeth_do_send_packet_fast(struct qeth_qdio_out_q *queue,
|
||||
struct sk_buff *skb, struct qeth_hdr *hdr,
|
||||
unsigned int offset, unsigned int hd_len)
|
||||
static int __qeth_xmit(struct qeth_card *card, struct qeth_qdio_out_q *queue,
|
||||
struct sk_buff *skb, unsigned int elements,
|
||||
struct qeth_hdr *hdr, unsigned int offset,
|
||||
unsigned int hd_len)
|
||||
{
|
||||
int index = queue->next_buf_to_fill;
|
||||
struct qeth_qdio_out_buffer *buffer = queue->bufs[index];
|
||||
struct qeth_qdio_out_buffer *buffer = queue->bufs[queue->bulk_start];
|
||||
unsigned int bytes = qdisc_pkt_len(skb);
|
||||
unsigned int next_element;
|
||||
struct netdev_queue *txq;
|
||||
bool stopped = false;
|
||||
bool flush;
|
||||
|
||||
txq = netdev_get_tx_queue(card->dev, skb_get_queue_mapping(skb));
|
||||
|
||||
/* Just a sanity check, the wake/stop logic should ensure that we always
|
||||
* get a free buffer.
|
||||
@ -3787,9 +3818,19 @@ static int qeth_do_send_packet_fast(struct qeth_qdio_out_q *queue,
|
||||
if (atomic_read(&buffer->state) != QETH_QDIO_BUF_EMPTY)
|
||||
return -EBUSY;
|
||||
|
||||
txq = netdev_get_tx_queue(queue->card->dev, skb_get_queue_mapping(skb));
|
||||
if ((buffer->next_element_to_fill + elements > queue->max_elements) ||
|
||||
!qeth_iqd_may_bulk(queue, buffer, skb, hdr)) {
|
||||
atomic_set(&buffer->state, QETH_QDIO_BUF_PRIMED);
|
||||
qeth_flush_queue(queue);
|
||||
buffer = queue->bufs[queue->bulk_start];
|
||||
|
||||
if (atomic_inc_return(&queue->used_buffers) >= QDIO_MAX_BUFFERS_PER_Q) {
|
||||
/* Sanity-check again: */
|
||||
if (atomic_read(&buffer->state) != QETH_QDIO_BUF_EMPTY)
|
||||
return -EBUSY;
|
||||
}
|
||||
|
||||
if (buffer->next_element_to_fill == 0 &&
|
||||
atomic_inc_return(&queue->used_buffers) >= QDIO_MAX_BUFFERS_PER_Q) {
|
||||
/* If a TX completion happens right _here_ and misses to wake
|
||||
* the txq, then our re-check below will catch the race.
|
||||
*/
|
||||
@ -3798,8 +3839,17 @@ static int qeth_do_send_packet_fast(struct qeth_qdio_out_q *queue,
|
||||
stopped = true;
|
||||
}
|
||||
|
||||
qeth_fill_buffer(queue, buffer, skb, hdr, offset, hd_len, stopped);
|
||||
qeth_flush_buffers(queue, index, 1);
|
||||
next_element = qeth_fill_buffer(buffer, skb, hdr, offset, hd_len);
|
||||
buffer->bytes += bytes;
|
||||
queue->prev_hdr = hdr;
|
||||
|
||||
flush = __netdev_tx_sent_queue(txq, bytes,
|
||||
!stopped && netdev_xmit_more());
|
||||
|
||||
if (flush || next_element >= queue->max_elements) {
|
||||
atomic_set(&buffer->state, QETH_QDIO_BUF_PRIMED);
|
||||
qeth_flush_queue(queue);
|
||||
}
|
||||
|
||||
if (stopped && !qeth_out_queue_is_full(queue))
|
||||
netif_tx_start_queue(txq);
|
||||
@ -3812,6 +3862,7 @@ int qeth_do_send_packet(struct qeth_card *card, struct qeth_qdio_out_q *queue,
|
||||
int elements_needed)
|
||||
{
|
||||
struct qeth_qdio_out_buffer *buffer;
|
||||
unsigned int next_element;
|
||||
struct netdev_queue *txq;
|
||||
bool stopped = false;
|
||||
int start_index;
|
||||
@ -3874,8 +3925,17 @@ int qeth_do_send_packet(struct qeth_card *card, struct qeth_qdio_out_q *queue,
|
||||
stopped = true;
|
||||
}
|
||||
|
||||
flush_count += qeth_fill_buffer(queue, buffer, skb, hdr, offset, hd_len,
|
||||
stopped);
|
||||
next_element = qeth_fill_buffer(buffer, skb, hdr, offset, hd_len);
|
||||
|
||||
if (queue->do_pack)
|
||||
QETH_TXQ_STAT_INC(queue, skbs_pack);
|
||||
if (!queue->do_pack || stopped || next_element >= queue->max_elements) {
|
||||
flush_count++;
|
||||
atomic_set(&buffer->state, QETH_QDIO_BUF_PRIMED);
|
||||
queue->next_buf_to_fill = (queue->next_buf_to_fill + 1) %
|
||||
QDIO_MAX_BUFFERS_PER_Q;
|
||||
}
|
||||
|
||||
if (flush_count)
|
||||
qeth_flush_buffers(queue, start_index, flush_count);
|
||||
else if (!atomic_read(&queue->set_pci_flags_count))
|
||||
@ -3942,7 +4002,6 @@ int qeth_xmit(struct qeth_card *card, struct sk_buff *skb,
|
||||
unsigned int hd_len = 0;
|
||||
unsigned int elements;
|
||||
int push_len, rc;
|
||||
bool is_sg;
|
||||
|
||||
if (is_tso) {
|
||||
hw_hdr_len = sizeof(struct qeth_hdr_tso);
|
||||
@ -3971,10 +4030,9 @@ int qeth_xmit(struct qeth_card *card, struct sk_buff *skb,
|
||||
qeth_fill_tso_ext((struct qeth_hdr_tso *) hdr,
|
||||
frame_len - proto_len, skb, proto_len);
|
||||
|
||||
is_sg = skb_is_nonlinear(skb);
|
||||
if (IS_IQD(card)) {
|
||||
rc = qeth_do_send_packet_fast(queue, skb, hdr, data_offset,
|
||||
hd_len);
|
||||
rc = __qeth_xmit(card, queue, skb, elements, hdr, data_offset,
|
||||
hd_len);
|
||||
} else {
|
||||
/* TODO: drop skb_orphan() once TX completion is fast enough */
|
||||
skb_orphan(skb);
|
||||
@ -3982,18 +4040,9 @@ int qeth_xmit(struct qeth_card *card, struct sk_buff *skb,
|
||||
hd_len, elements);
|
||||
}
|
||||
|
||||
if (!rc) {
|
||||
QETH_TXQ_STAT_ADD(queue, buf_elements, elements);
|
||||
if (is_sg)
|
||||
QETH_TXQ_STAT_INC(queue, skbs_sg);
|
||||
if (is_tso) {
|
||||
QETH_TXQ_STAT_INC(queue, skbs_tso);
|
||||
QETH_TXQ_STAT_ADD(queue, tso_bytes, frame_len);
|
||||
}
|
||||
} else {
|
||||
if (!push_len)
|
||||
kmem_cache_free(qeth_core_header_cache, hdr);
|
||||
}
|
||||
if (rc && !push_len)
|
||||
kmem_cache_free(qeth_core_header_cache, hdr);
|
||||
|
||||
return rc;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(qeth_xmit);
|
||||
@ -4724,7 +4773,7 @@ static int qeth_qdio_establish(struct qeth_card *card)
|
||||
init_data.input_sbal_addr_array = in_sbal_ptrs;
|
||||
init_data.output_sbal_addr_array = out_sbal_ptrs;
|
||||
init_data.output_sbal_state_array = card->qdio.out_bufstates;
|
||||
init_data.scan_threshold = IS_IQD(card) ? 1 : 32;
|
||||
init_data.scan_threshold = IS_IQD(card) ? 0 : 32;
|
||||
|
||||
if (atomic_cmpxchg(&card->qdio.state, QETH_QDIO_ALLOCATED,
|
||||
QETH_QDIO_ESTABLISHED) == QETH_QDIO_ALLOCATED) {
|
||||
@ -5138,6 +5187,107 @@ out:
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(qeth_poll);
|
||||
|
||||
static void qeth_iqd_tx_complete(struct qeth_qdio_out_q *queue,
|
||||
unsigned int bidx, bool error, int budget)
|
||||
{
|
||||
struct qeth_qdio_out_buffer *buffer = queue->bufs[bidx];
|
||||
u8 sflags = buffer->buffer->element[15].sflags;
|
||||
struct qeth_card *card = queue->card;
|
||||
|
||||
if (queue->bufstates && (queue->bufstates[bidx].flags &
|
||||
QDIO_OUTBUF_STATE_FLAG_PENDING)) {
|
||||
WARN_ON_ONCE(card->options.cq != QETH_CQ_ENABLED);
|
||||
|
||||
if (atomic_cmpxchg(&buffer->state, QETH_QDIO_BUF_PRIMED,
|
||||
QETH_QDIO_BUF_PENDING) ==
|
||||
QETH_QDIO_BUF_PRIMED)
|
||||
qeth_notify_skbs(queue, buffer, TX_NOTIFY_PENDING);
|
||||
|
||||
QETH_CARD_TEXT_(card, 5, "pel%u", bidx);
|
||||
|
||||
/* prepare the queue slot for re-use: */
|
||||
qeth_scrub_qdio_buffer(buffer->buffer, queue->max_elements);
|
||||
if (qeth_init_qdio_out_buf(queue, bidx)) {
|
||||
QETH_CARD_TEXT(card, 2, "outofbuf");
|
||||
qeth_schedule_recovery(card);
|
||||
}
|
||||
|
||||
return;
|
||||
}
|
||||
|
||||
if (card->options.cq == QETH_CQ_ENABLED)
|
||||
qeth_notify_skbs(queue, buffer,
|
||||
qeth_compute_cq_notification(sflags, 0));
|
||||
qeth_clear_output_buffer(queue, buffer, error, budget);
|
||||
}
|
||||
|
||||
static int qeth_tx_poll(struct napi_struct *napi, int budget)
|
||||
{
|
||||
struct qeth_qdio_out_q *queue = qeth_napi_to_out_queue(napi);
|
||||
unsigned int queue_no = queue->queue_no;
|
||||
struct qeth_card *card = queue->card;
|
||||
struct net_device *dev = card->dev;
|
||||
unsigned int work_done = 0;
|
||||
struct netdev_queue *txq;
|
||||
|
||||
txq = netdev_get_tx_queue(dev, qeth_iqd_translate_txq(dev, queue_no));
|
||||
|
||||
while (1) {
|
||||
unsigned int start, error, i;
|
||||
unsigned int packets = 0;
|
||||
unsigned int bytes = 0;
|
||||
int completed;
|
||||
|
||||
if (qeth_out_queue_is_empty(queue)) {
|
||||
napi_complete(napi);
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* Give the CPU a breather: */
|
||||
if (work_done >= QDIO_MAX_BUFFERS_PER_Q) {
|
||||
QETH_TXQ_STAT_INC(queue, completion_yield);
|
||||
if (napi_complete_done(napi, 0))
|
||||
napi_schedule(napi);
|
||||
return 0;
|
||||
}
|
||||
|
||||
completed = qdio_inspect_queue(CARD_DDEV(card), queue_no, false,
|
||||
&start, &error);
|
||||
if (completed <= 0) {
|
||||
/* Ensure we see TX completion for pending work: */
|
||||
if (napi_complete_done(napi, 0))
|
||||
qeth_tx_arm_timer(queue);
|
||||
return 0;
|
||||
}
|
||||
|
||||
for (i = start; i < start + completed; i++) {
|
||||
struct qeth_qdio_out_buffer *buffer;
|
||||
unsigned int bidx = QDIO_BUFNR(i);
|
||||
|
||||
buffer = queue->bufs[bidx];
|
||||
packets += skb_queue_len(&buffer->skb_list);
|
||||
bytes += buffer->bytes;
|
||||
|
||||
qeth_handle_send_error(card, buffer, error);
|
||||
qeth_iqd_tx_complete(queue, bidx, error, budget);
|
||||
qeth_cleanup_handled_pending(queue, bidx, false);
|
||||
}
|
||||
|
||||
netdev_tx_completed_queue(txq, packets, bytes);
|
||||
atomic_sub(completed, &queue->used_buffers);
|
||||
work_done += completed;
|
||||
|
||||
/* xmit may have observed the full-condition, but not yet
|
||||
* stopped the txq. In which case the code below won't trigger.
|
||||
* So before returning, xmit will re-check the txq's fill level
|
||||
* and wake it up if needed.
|
||||
*/
|
||||
if (netif_tx_queue_stopped(txq) &&
|
||||
!qeth_out_queue_is_full(queue))
|
||||
netif_tx_wake_queue(txq);
|
||||
}
|
||||
}
|
||||
|
||||
static int qeth_setassparms_inspect_rc(struct qeth_ipa_cmd *cmd)
|
||||
{
|
||||
if (!cmd->hdr.return_code)
|
||||
@ -6084,6 +6234,17 @@ int qeth_open(struct net_device *dev)
|
||||
napi_enable(&card->napi);
|
||||
local_bh_disable();
|
||||
napi_schedule(&card->napi);
|
||||
if (IS_IQD(card)) {
|
||||
struct qeth_qdio_out_q *queue;
|
||||
unsigned int i;
|
||||
|
||||
qeth_for_each_output_queue(card, queue, i) {
|
||||
netif_tx_napi_add(dev, &queue->napi, qeth_tx_poll,
|
||||
QETH_NAPI_WEIGHT);
|
||||
napi_enable(&queue->napi);
|
||||
napi_schedule(&queue->napi);
|
||||
}
|
||||
}
|
||||
/* kick-start the NAPI softirq: */
|
||||
local_bh_enable();
|
||||
return 0;
|
||||
@ -6095,7 +6256,26 @@ int qeth_stop(struct net_device *dev)
|
||||
struct qeth_card *card = dev->ml_priv;
|
||||
|
||||
QETH_CARD_TEXT(card, 4, "qethstop");
|
||||
netif_tx_disable(dev);
|
||||
if (IS_IQD(card)) {
|
||||
struct qeth_qdio_out_q *queue;
|
||||
unsigned int i;
|
||||
|
||||
/* Quiesce the NAPI instances: */
|
||||
qeth_for_each_output_queue(card, queue, i) {
|
||||
napi_disable(&queue->napi);
|
||||
del_timer_sync(&queue->timer);
|
||||
}
|
||||
|
||||
/* Stop .ndo_start_xmit, might still access queue->napi. */
|
||||
netif_tx_disable(dev);
|
||||
|
||||
/* Queues may get re-allocated, so remove the NAPIs here. */
|
||||
qeth_for_each_output_queue(card, queue, i)
|
||||
netif_napi_del(&queue->napi);
|
||||
} else {
|
||||
netif_tx_disable(dev);
|
||||
}
|
||||
|
||||
napi_disable(&card->napi);
|
||||
return 0;
|
||||
}
|
||||
|
@ -39,6 +39,8 @@ static const struct qeth_stats txq_stats[] = {
|
||||
QETH_TXQ_STAT("TSO bytes", tso_bytes),
|
||||
QETH_TXQ_STAT("Packing mode switches", packing_mode_switch),
|
||||
QETH_TXQ_STAT("Queue stopped", stopped),
|
||||
QETH_TXQ_STAT("Completion yield", completion_yield),
|
||||
QETH_TXQ_STAT("Completion timer", completion_timer),
|
||||
};
|
||||
|
||||
static const struct qeth_stats card_stats[] = {
|
||||
|
@ -175,10 +175,8 @@ static void qeth_l2_fill_header(struct qeth_qdio_out_q *queue,
|
||||
hdr->hdr.l2.id = QETH_HEADER_TYPE_L2_TSO;
|
||||
} else {
|
||||
hdr->hdr.l2.id = QETH_HEADER_TYPE_LAYER2;
|
||||
if (skb->ip_summed == CHECKSUM_PARTIAL) {
|
||||
if (skb->ip_summed == CHECKSUM_PARTIAL)
|
||||
qeth_tx_csum(skb, &hdr->hdr.l2.flags[1], ipv);
|
||||
QETH_TXQ_STAT_INC(queue, skbs_csum);
|
||||
}
|
||||
}
|
||||
|
||||
/* set byte byte 3 to casting flags */
|
||||
@ -588,9 +586,10 @@ static netdev_tx_t qeth_l2_hard_start_xmit(struct sk_buff *skb,
|
||||
struct qeth_card *card = dev->ml_priv;
|
||||
u16 txq = skb_get_queue_mapping(skb);
|
||||
struct qeth_qdio_out_q *queue;
|
||||
int tx_bytes = skb->len;
|
||||
int rc;
|
||||
|
||||
if (!skb_is_gso(skb))
|
||||
qdisc_skb_cb(skb)->pkt_len = skb->len;
|
||||
if (IS_IQD(card))
|
||||
txq = qeth_iqd_translate_txq(dev, txq);
|
||||
queue = card->qdio.out_qs[txq];
|
||||
@ -601,11 +600,8 @@ static netdev_tx_t qeth_l2_hard_start_xmit(struct sk_buff *skb,
|
||||
rc = qeth_xmit(card, skb, queue, qeth_get_ip_version(skb),
|
||||
qeth_l2_fill_header);
|
||||
|
||||
if (!rc) {
|
||||
QETH_TXQ_STAT_INC(queue, tx_packets);
|
||||
QETH_TXQ_STAT_ADD(queue, tx_bytes, tx_bytes);
|
||||
if (!rc)
|
||||
return NETDEV_TX_OK;
|
||||
}
|
||||
|
||||
QETH_TXQ_STAT_INC(queue, tx_dropped);
|
||||
kfree_skb(skb);
|
||||
|
@ -1957,7 +1957,6 @@ static void qeth_l3_fill_header(struct qeth_qdio_out_q *queue,
|
||||
/* some HW requires combined L3+L4 csum offload: */
|
||||
if (ipv == 4)
|
||||
hdr->hdr.l3.ext_flags |= QETH_HDR_EXT_CSUM_HDR_REQ;
|
||||
QETH_TXQ_STAT_INC(queue, skbs_csum);
|
||||
}
|
||||
}
|
||||
|
||||
@ -2044,9 +2043,10 @@ static netdev_tx_t qeth_l3_hard_start_xmit(struct sk_buff *skb,
|
||||
u16 txq = skb_get_queue_mapping(skb);
|
||||
int ipv = qeth_get_ip_version(skb);
|
||||
struct qeth_qdio_out_q *queue;
|
||||
int tx_bytes = skb->len;
|
||||
int rc;
|
||||
|
||||
if (!skb_is_gso(skb))
|
||||
qdisc_skb_cb(skb)->pkt_len = skb->len;
|
||||
if (IS_IQD(card)) {
|
||||
queue = card->qdio.out_qs[qeth_iqd_translate_txq(dev, txq)];
|
||||
|
||||
@ -2069,11 +2069,8 @@ static netdev_tx_t qeth_l3_hard_start_xmit(struct sk_buff *skb,
|
||||
else
|
||||
rc = qeth_xmit(card, skb, queue, ipv, qeth_l3_fill_header);
|
||||
|
||||
if (!rc) {
|
||||
QETH_TXQ_STAT_INC(queue, tx_packets);
|
||||
QETH_TXQ_STAT_ADD(queue, tx_bytes, tx_bytes);
|
||||
if (!rc)
|
||||
return NETDEV_TX_OK;
|
||||
}
|
||||
|
||||
tx_drop:
|
||||
QETH_TXQ_STAT_INC(queue, tx_dropped);
|
||||
|
Loading…
x
Reference in New Issue
Block a user