From 7d9d60fd4ab69604015b094c07ad8490039bb2a4 Mon Sep 17 00:00:00 2001 From: Toshiaki Makita Date: Mon, 23 Jul 2018 23:36:04 +0900 Subject: [PATCH 1/6] virtio_net: Fix incosistent received bytes counter When received packets are dropped in virtio_net driver, received packets counter is incremented but bytes counter is not. As a result, for instance if we drop all packets by XDP, only received is counted and bytes stays 0, which looks inconsistent. IMHO received packets/bytes should be counted if packets are produced by the hypervisor, like what common NICs on physical machines are doing. So fix the bytes counter. Signed-off-by: Toshiaki Makita Signed-off-by: David S. Miller --- drivers/net/virtio_net.c | 41 ++++++++++++++++++++++------------------ 1 file changed, 23 insertions(+), 18 deletions(-) diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c index 2ff08bc103a9..abbd3bc83b62 100644 --- a/drivers/net/virtio_net.c +++ b/drivers/net/virtio_net.c @@ -586,7 +586,8 @@ static struct sk_buff *receive_small(struct net_device *dev, struct receive_queue *rq, void *buf, void *ctx, unsigned int len, - unsigned int *xdp_xmit) + unsigned int *xdp_xmit, + unsigned int *rbytes) { struct sk_buff *skb; struct bpf_prog *xdp_prog; @@ -601,6 +602,7 @@ static struct sk_buff *receive_small(struct net_device *dev, int err; len -= vi->hdr_len; + *rbytes += len; rcu_read_lock(); xdp_prog = rcu_dereference(rq->xdp_prog); @@ -705,11 +707,13 @@ static struct sk_buff *receive_big(struct net_device *dev, struct virtnet_info *vi, struct receive_queue *rq, void *buf, - unsigned int len) + unsigned int len, + unsigned int *rbytes) { struct page *page = buf; struct sk_buff *skb = page_to_skb(vi, rq, page, 0, len, PAGE_SIZE); + *rbytes += len - vi->hdr_len; if (unlikely(!skb)) goto err; @@ -727,7 +731,8 @@ static struct sk_buff *receive_mergeable(struct net_device *dev, void *buf, void *ctx, unsigned int len, - unsigned int *xdp_xmit) + unsigned int *xdp_xmit, + unsigned int *rbytes) { struct virtio_net_hdr_mrg_rxbuf *hdr = buf; u16 num_buf = virtio16_to_cpu(vi->vdev, hdr->num_buffers); @@ -740,6 +745,7 @@ static struct sk_buff *receive_mergeable(struct net_device *dev, int err; head_skb = NULL; + *rbytes += len - vi->hdr_len; rcu_read_lock(); xdp_prog = rcu_dereference(rq->xdp_prog); @@ -877,6 +883,7 @@ static struct sk_buff *receive_mergeable(struct net_device *dev, goto err_buf; } + *rbytes += len; page = virt_to_head_page(buf); truesize = mergeable_ctx_to_truesize(ctx); @@ -932,6 +939,7 @@ err_skb: dev->stats.rx_length_errors++; break; } + *rbytes += len; page = virt_to_head_page(buf); put_page(page); } @@ -942,14 +950,13 @@ xdp_xmit: return NULL; } -static int receive_buf(struct virtnet_info *vi, struct receive_queue *rq, - void *buf, unsigned int len, void **ctx, - unsigned int *xdp_xmit) +static void receive_buf(struct virtnet_info *vi, struct receive_queue *rq, + void *buf, unsigned int len, void **ctx, + unsigned int *xdp_xmit, unsigned int *rbytes) { struct net_device *dev = vi->dev; struct sk_buff *skb; struct virtio_net_hdr_mrg_rxbuf *hdr; - int ret; if (unlikely(len < vi->hdr_len + ETH_HLEN)) { pr_debug("%s: short packet %i\n", dev->name, len); @@ -961,23 +968,22 @@ static int receive_buf(struct virtnet_info *vi, struct receive_queue *rq, } else { put_page(virt_to_head_page(buf)); } - return 0; + return; } if (vi->mergeable_rx_bufs) - skb = receive_mergeable(dev, vi, rq, buf, ctx, len, xdp_xmit); + skb = receive_mergeable(dev, vi, rq, buf, ctx, len, xdp_xmit, + rbytes); else if (vi->big_packets) - skb = receive_big(dev, vi, rq, buf, len); + skb = receive_big(dev, vi, rq, buf, len, rbytes); else - skb = receive_small(dev, vi, rq, buf, ctx, len, xdp_xmit); + skb = receive_small(dev, vi, rq, buf, ctx, len, xdp_xmit, rbytes); if (unlikely(!skb)) - return 0; + return; hdr = skb_vnet_hdr(skb); - ret = skb->len; - if (hdr->hdr.flags & VIRTIO_NET_HDR_F_DATA_VALID) skb->ip_summed = CHECKSUM_UNNECESSARY; @@ -994,12 +1000,11 @@ static int receive_buf(struct virtnet_info *vi, struct receive_queue *rq, ntohs(skb->protocol), skb->len, skb->pkt_type); napi_gro_receive(&rq->napi, skb); - return ret; + return; frame_err: dev->stats.rx_frame_errors++; dev_kfree_skb(skb); - return 0; } /* Unlike mergeable buffers, all buffers are allocated to the @@ -1249,13 +1254,13 @@ static int virtnet_receive(struct receive_queue *rq, int budget, while (received < budget && (buf = virtqueue_get_buf_ctx(rq->vq, &len, &ctx))) { - bytes += receive_buf(vi, rq, buf, len, ctx, xdp_xmit); + receive_buf(vi, rq, buf, len, ctx, xdp_xmit, &bytes); received++; } } else { while (received < budget && (buf = virtqueue_get_buf(rq->vq, &len)) != NULL) { - bytes += receive_buf(vi, rq, buf, len, NULL, xdp_xmit); + receive_buf(vi, rq, buf, len, NULL, xdp_xmit, &bytes); received++; } } From a0929a44c2065da33c17b1b8015a88401d71ca7b Mon Sep 17 00:00:00 2001 From: Toshiaki Makita Date: Mon, 23 Jul 2018 23:36:05 +0900 Subject: [PATCH 2/6] virtio_net: Use temporary storage for accounting rx stats The purpose is to keep receive_buf arguments simple when more per-queue counter items are added later. Also XDP_TX related sq counters will be updated in the following changes so create a container struct virtnet_rx_stats which will includes both rq and sq statistics. For now it only covers rq stats. Signed-off-by: Toshiaki Makita Signed-off-by: David S. Miller --- drivers/net/virtio_net.c | 72 ++++++++++++++++++++++++---------------- 1 file changed, 44 insertions(+), 28 deletions(-) diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c index abbd3bc83b62..d03bfc4fce8e 100644 --- a/drivers/net/virtio_net.c +++ b/drivers/net/virtio_net.c @@ -84,14 +84,22 @@ struct virtnet_sq_stats { u64 bytes; }; -struct virtnet_rq_stats { - struct u64_stats_sync syncp; +struct virtnet_rq_stat_items { u64 packets; u64 bytes; }; +struct virtnet_rq_stats { + struct u64_stats_sync syncp; + struct virtnet_rq_stat_items items; +}; + +struct virtnet_rx_stats { + struct virtnet_rq_stat_items rx; +}; + #define VIRTNET_SQ_STAT(m) offsetof(struct virtnet_sq_stats, m) -#define VIRTNET_RQ_STAT(m) offsetof(struct virtnet_rq_stats, m) +#define VIRTNET_RQ_STAT(m) offsetof(struct virtnet_rq_stat_items, m) static const struct virtnet_stat_desc virtnet_sq_stats_desc[] = { { "packets", VIRTNET_SQ_STAT(packets) }, @@ -587,7 +595,7 @@ static struct sk_buff *receive_small(struct net_device *dev, void *buf, void *ctx, unsigned int len, unsigned int *xdp_xmit, - unsigned int *rbytes) + struct virtnet_rx_stats *stats) { struct sk_buff *skb; struct bpf_prog *xdp_prog; @@ -602,7 +610,7 @@ static struct sk_buff *receive_small(struct net_device *dev, int err; len -= vi->hdr_len; - *rbytes += len; + stats->rx.bytes += len; rcu_read_lock(); xdp_prog = rcu_dereference(rq->xdp_prog); @@ -708,12 +716,12 @@ static struct sk_buff *receive_big(struct net_device *dev, struct receive_queue *rq, void *buf, unsigned int len, - unsigned int *rbytes) + struct virtnet_rx_stats *stats) { struct page *page = buf; struct sk_buff *skb = page_to_skb(vi, rq, page, 0, len, PAGE_SIZE); - *rbytes += len - vi->hdr_len; + stats->rx.bytes += len - vi->hdr_len; if (unlikely(!skb)) goto err; @@ -732,7 +740,7 @@ static struct sk_buff *receive_mergeable(struct net_device *dev, void *ctx, unsigned int len, unsigned int *xdp_xmit, - unsigned int *rbytes) + struct virtnet_rx_stats *stats) { struct virtio_net_hdr_mrg_rxbuf *hdr = buf; u16 num_buf = virtio16_to_cpu(vi->vdev, hdr->num_buffers); @@ -745,7 +753,7 @@ static struct sk_buff *receive_mergeable(struct net_device *dev, int err; head_skb = NULL; - *rbytes += len - vi->hdr_len; + stats->rx.bytes += len - vi->hdr_len; rcu_read_lock(); xdp_prog = rcu_dereference(rq->xdp_prog); @@ -883,7 +891,7 @@ static struct sk_buff *receive_mergeable(struct net_device *dev, goto err_buf; } - *rbytes += len; + stats->rx.bytes += len; page = virt_to_head_page(buf); truesize = mergeable_ctx_to_truesize(ctx); @@ -939,7 +947,7 @@ err_skb: dev->stats.rx_length_errors++; break; } - *rbytes += len; + stats->rx.bytes += len; page = virt_to_head_page(buf); put_page(page); } @@ -952,7 +960,8 @@ xdp_xmit: static void receive_buf(struct virtnet_info *vi, struct receive_queue *rq, void *buf, unsigned int len, void **ctx, - unsigned int *xdp_xmit, unsigned int *rbytes) + unsigned int *xdp_xmit, + struct virtnet_rx_stats *stats) { struct net_device *dev = vi->dev; struct sk_buff *skb; @@ -973,11 +982,11 @@ static void receive_buf(struct virtnet_info *vi, struct receive_queue *rq, if (vi->mergeable_rx_bufs) skb = receive_mergeable(dev, vi, rq, buf, ctx, len, xdp_xmit, - rbytes); + stats); else if (vi->big_packets) - skb = receive_big(dev, vi, rq, buf, len, rbytes); + skb = receive_big(dev, vi, rq, buf, len, stats); else - skb = receive_small(dev, vi, rq, buf, ctx, len, xdp_xmit, rbytes); + skb = receive_small(dev, vi, rq, buf, ctx, len, xdp_xmit, stats); if (unlikely(!skb)) return; @@ -1246,22 +1255,24 @@ static int virtnet_receive(struct receive_queue *rq, int budget, unsigned int *xdp_xmit) { struct virtnet_info *vi = rq->vq->vdev->priv; - unsigned int len, received = 0, bytes = 0; + struct virtnet_rx_stats stats = {}; + unsigned int len; void *buf; + int i; if (!vi->big_packets || vi->mergeable_rx_bufs) { void *ctx; - while (received < budget && + while (stats.rx.packets < budget && (buf = virtqueue_get_buf_ctx(rq->vq, &len, &ctx))) { - receive_buf(vi, rq, buf, len, ctx, xdp_xmit, &bytes); - received++; + receive_buf(vi, rq, buf, len, ctx, xdp_xmit, &stats); + stats.rx.packets++; } } else { - while (received < budget && + while (stats.rx.packets < budget && (buf = virtqueue_get_buf(rq->vq, &len)) != NULL) { - receive_buf(vi, rq, buf, len, NULL, xdp_xmit, &bytes); - received++; + receive_buf(vi, rq, buf, len, NULL, xdp_xmit, &stats); + stats.rx.packets++; } } @@ -1271,11 +1282,16 @@ static int virtnet_receive(struct receive_queue *rq, int budget, } u64_stats_update_begin(&rq->stats.syncp); - rq->stats.bytes += bytes; - rq->stats.packets += received; + for (i = 0; i < VIRTNET_RQ_STATS_LEN; i++) { + size_t offset = virtnet_rq_stats_desc[i].offset; + u64 *item; + + item = (u64 *)((u8 *)&rq->stats.items + offset); + *item += *(u64 *)((u8 *)&stats.rx + offset); + } u64_stats_update_end(&rq->stats.syncp); - return received; + return stats.rx.packets; } static void free_old_xmit_skbs(struct send_queue *sq) @@ -1628,8 +1644,8 @@ static void virtnet_stats(struct net_device *dev, do { start = u64_stats_fetch_begin_irq(&rq->stats.syncp); - rpackets = rq->stats.packets; - rbytes = rq->stats.bytes; + rpackets = rq->stats.items.packets; + rbytes = rq->stats.items.bytes; } while (u64_stats_fetch_retry_irq(&rq->stats.syncp, start)); tot->rx_packets += rpackets; @@ -2019,7 +2035,7 @@ static void virtnet_get_ethtool_stats(struct net_device *dev, for (i = 0; i < vi->curr_queue_pairs; i++) { struct receive_queue *rq = &vi->rq[i]; - stats_base = (u8 *)&rq->stats; + stats_base = (u8 *)&rq->stats.items; do { start = u64_stats_fetch_begin_irq(&rq->stats.syncp); for (j = 0; j < VIRTNET_RQ_STATS_LEN; j++) { From 2c4a2f7d826329966e8ab86f842a95d3234c64da Mon Sep 17 00:00:00 2001 From: Toshiaki Makita Date: Mon, 23 Jul 2018 23:36:06 +0900 Subject: [PATCH 3/6] virtio_net: Make drop counter per-queue Since when XDP was introduced, drop counter has been able to be updated much more frequently than before, as XDP_DROP increments the counter. Thus for performance analysis per-queue drop counter would be useful. Also this avoids cache contention and race on updating the counter. It is currently racy because napi handlers read-modify-write it without any locks. There are more counters in dev->stats that are racy, but I left them per-device, because they are rarely updated and does not worth being per-queue counters IMHO. To fix them we need atomic ops or some kind of locks. Signed-off-by: Toshiaki Makita Signed-off-by: David S. Miller --- drivers/net/virtio_net.c | 13 ++++++++----- 1 file changed, 8 insertions(+), 5 deletions(-) diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c index d03bfc4fce8e..7a47ce750a43 100644 --- a/drivers/net/virtio_net.c +++ b/drivers/net/virtio_net.c @@ -87,6 +87,7 @@ struct virtnet_sq_stats { struct virtnet_rq_stat_items { u64 packets; u64 bytes; + u64 drops; }; struct virtnet_rq_stats { @@ -109,6 +110,7 @@ static const struct virtnet_stat_desc virtnet_sq_stats_desc[] = { static const struct virtnet_stat_desc virtnet_rq_stats_desc[] = { { "packets", VIRTNET_RQ_STAT(packets) }, { "bytes", VIRTNET_RQ_STAT(bytes) }, + { "drops", VIRTNET_RQ_STAT(drops) }, }; #define VIRTNET_SQ_STATS_LEN ARRAY_SIZE(virtnet_sq_stats_desc) @@ -705,7 +707,7 @@ err: err_xdp: rcu_read_unlock(); - dev->stats.rx_dropped++; + stats->rx.drops++; put_page(page); xdp_xmit: return NULL; @@ -728,7 +730,7 @@ static struct sk_buff *receive_big(struct net_device *dev, return skb; err: - dev->stats.rx_dropped++; + stats->rx.drops++; give_pages(rq, page); return NULL; } @@ -952,7 +954,7 @@ err_skb: put_page(page); } err_buf: - dev->stats.rx_dropped++; + stats->rx.drops++; dev_kfree_skb(head_skb); xdp_xmit: return NULL; @@ -1632,7 +1634,7 @@ static void virtnet_stats(struct net_device *dev, int i; for (i = 0; i < vi->max_queue_pairs; i++) { - u64 tpackets, tbytes, rpackets, rbytes; + u64 tpackets, tbytes, rpackets, rbytes, rdrops; struct receive_queue *rq = &vi->rq[i]; struct send_queue *sq = &vi->sq[i]; @@ -1646,17 +1648,18 @@ static void virtnet_stats(struct net_device *dev, start = u64_stats_fetch_begin_irq(&rq->stats.syncp); rpackets = rq->stats.items.packets; rbytes = rq->stats.items.bytes; + rdrops = rq->stats.items.drops; } while (u64_stats_fetch_retry_irq(&rq->stats.syncp, start)); tot->rx_packets += rpackets; tot->tx_packets += tpackets; tot->rx_bytes += rbytes; tot->tx_bytes += tbytes; + tot->rx_dropped += rdrops; } tot->tx_dropped = dev->stats.tx_dropped; tot->tx_fifo_errors = dev->stats.tx_fifo_errors; - tot->rx_dropped = dev->stats.rx_dropped; tot->rx_length_errors = dev->stats.rx_length_errors; tot->rx_frame_errors = dev->stats.rx_frame_errors; } From 2a43565c06465323e33b117bcf2ce604c0594659 Mon Sep 17 00:00:00 2001 From: Toshiaki Makita Date: Mon, 23 Jul 2018 23:36:07 +0900 Subject: [PATCH 4/6] virtio_net: Factor out the logic to determine xdp sq Make sure to use the same logic in all places to determine xdp sq. This is useful for xdp counters which the following commit will introduce as well. Signed-off-by: Toshiaki Makita Signed-off-by: David S. Miller --- drivers/net/virtio_net.c | 22 ++++++++++++---------- 1 file changed, 12 insertions(+), 10 deletions(-) diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c index 7a47ce750a43..eca9b13b859e 100644 --- a/drivers/net/virtio_net.c +++ b/drivers/net/virtio_net.c @@ -457,16 +457,22 @@ static int __virtnet_xdp_xmit_one(struct virtnet_info *vi, return 0; } +static struct send_queue *virtnet_xdp_sq(struct virtnet_info *vi) +{ + unsigned int qp; + + qp = vi->curr_queue_pairs - vi->xdp_queue_pairs + smp_processor_id(); + return &vi->sq[qp]; +} + static int __virtnet_xdp_tx_xmit(struct virtnet_info *vi, struct xdp_frame *xdpf) { struct xdp_frame *xdpf_sent; struct send_queue *sq; unsigned int len; - unsigned int qp; - qp = vi->curr_queue_pairs - vi->xdp_queue_pairs + smp_processor_id(); - sq = &vi->sq[qp]; + sq = virtnet_xdp_sq(vi); /* Free up any pending old buffers before queueing new ones. */ while ((xdpf_sent = virtqueue_get_buf(sq->vq, &len)) != NULL) @@ -484,7 +490,6 @@ static int virtnet_xdp_xmit(struct net_device *dev, struct bpf_prog *xdp_prog; struct send_queue *sq; unsigned int len; - unsigned int qp; int drops = 0; int err; int i; @@ -492,8 +497,7 @@ static int virtnet_xdp_xmit(struct net_device *dev, if (unlikely(flags & ~XDP_XMIT_FLAGS_MASK)) return -EINVAL; - qp = vi->curr_queue_pairs - vi->xdp_queue_pairs + smp_processor_id(); - sq = &vi->sq[qp]; + sq = virtnet_xdp_sq(vi); /* Only allow ndo_xdp_xmit if XDP is loaded on dev, as this * indicate XDP resources have been successfully allocated. @@ -1349,7 +1353,7 @@ static int virtnet_poll(struct napi_struct *napi, int budget) container_of(napi, struct receive_queue, napi); struct virtnet_info *vi = rq->vq->vdev->priv; struct send_queue *sq; - unsigned int received, qp; + unsigned int received; unsigned int xdp_xmit = 0; virtnet_poll_cleantx(rq); @@ -1364,9 +1368,7 @@ static int virtnet_poll(struct napi_struct *napi, int budget) xdp_do_flush_map(); if (xdp_xmit & VIRTIO_XDP_TX) { - qp = vi->curr_queue_pairs - vi->xdp_queue_pairs + - smp_processor_id(); - sq = &vi->sq[qp]; + sq = virtnet_xdp_sq(vi); virtqueue_kick(sq->vq); } From 5b8f3c8d30a6176c6be35c6ac75e22b0a60a3c43 Mon Sep 17 00:00:00 2001 From: Toshiaki Makita Date: Mon, 23 Jul 2018 23:36:08 +0900 Subject: [PATCH 5/6] virtio_net: Add XDP related stats Add counters below: * Tx - xdp_tx: frames sent by ndo_xdp_xmit or XDP_TX. - xdp_tx_drops: dropped frames out of xdp_tx ones. * Rx - xdp_packets: frames went through xdp program. - xdp_tx: XDP_TX frames. - xdp_redirects: XDP_REDIRECT frames. - xdp_drops: any dropped frames out of xdp_packets ones. Signed-off-by: Toshiaki Makita Signed-off-by: David S. Miller --- drivers/net/virtio_net.c | 71 +++++++++++++++++++++++++++++++++------- 1 file changed, 59 insertions(+), 12 deletions(-) diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c index eca9b13b859e..cb4ef331567c 100644 --- a/drivers/net/virtio_net.c +++ b/drivers/net/virtio_net.c @@ -82,12 +82,18 @@ struct virtnet_sq_stats { struct u64_stats_sync syncp; u64 packets; u64 bytes; + u64 xdp_tx; + u64 xdp_tx_drops; }; struct virtnet_rq_stat_items { u64 packets; u64 bytes; u64 drops; + u64 xdp_packets; + u64 xdp_tx; + u64 xdp_redirects; + u64 xdp_drops; }; struct virtnet_rq_stats { @@ -97,20 +103,30 @@ struct virtnet_rq_stats { struct virtnet_rx_stats { struct virtnet_rq_stat_items rx; + struct { + unsigned int xdp_tx; + unsigned int xdp_tx_drops; + } tx; }; #define VIRTNET_SQ_STAT(m) offsetof(struct virtnet_sq_stats, m) #define VIRTNET_RQ_STAT(m) offsetof(struct virtnet_rq_stat_items, m) static const struct virtnet_stat_desc virtnet_sq_stats_desc[] = { - { "packets", VIRTNET_SQ_STAT(packets) }, - { "bytes", VIRTNET_SQ_STAT(bytes) }, + { "packets", VIRTNET_SQ_STAT(packets) }, + { "bytes", VIRTNET_SQ_STAT(bytes) }, + { "xdp_tx", VIRTNET_SQ_STAT(xdp_tx) }, + { "xdp_tx_drops", VIRTNET_SQ_STAT(xdp_tx_drops) }, }; static const struct virtnet_stat_desc virtnet_rq_stats_desc[] = { - { "packets", VIRTNET_RQ_STAT(packets) }, - { "bytes", VIRTNET_RQ_STAT(bytes) }, - { "drops", VIRTNET_RQ_STAT(drops) }, + { "packets", VIRTNET_RQ_STAT(packets) }, + { "bytes", VIRTNET_RQ_STAT(bytes) }, + { "drops", VIRTNET_RQ_STAT(drops) }, + { "xdp_packets", VIRTNET_RQ_STAT(xdp_packets) }, + { "xdp_tx", VIRTNET_RQ_STAT(xdp_tx) }, + { "xdp_redirects", VIRTNET_RQ_STAT(xdp_redirects) }, + { "xdp_drops", VIRTNET_RQ_STAT(xdp_drops) }, }; #define VIRTNET_SQ_STATS_LEN ARRAY_SIZE(virtnet_sq_stats_desc) @@ -491,20 +507,26 @@ static int virtnet_xdp_xmit(struct net_device *dev, struct send_queue *sq; unsigned int len; int drops = 0; - int err; + int ret, err; int i; - if (unlikely(flags & ~XDP_XMIT_FLAGS_MASK)) - return -EINVAL; - sq = virtnet_xdp_sq(vi); + if (unlikely(flags & ~XDP_XMIT_FLAGS_MASK)) { + ret = -EINVAL; + drops = n; + goto out; + } + /* Only allow ndo_xdp_xmit if XDP is loaded on dev, as this * indicate XDP resources have been successfully allocated. */ xdp_prog = rcu_dereference(rq->xdp_prog); - if (!xdp_prog) - return -ENXIO; + if (!xdp_prog) { + ret = -ENXIO; + drops = n; + goto out; + } /* Free up any pending old buffers before queueing new ones. */ while ((xdpf_sent = virtqueue_get_buf(sq->vq, &len)) != NULL) @@ -519,11 +541,17 @@ static int virtnet_xdp_xmit(struct net_device *dev, drops++; } } + ret = n - drops; if (flags & XDP_XMIT_FLUSH) virtqueue_kick(sq->vq); +out: + u64_stats_update_begin(&sq->stats.syncp); + sq->stats.xdp_tx += n; + sq->stats.xdp_tx_drops += drops; + u64_stats_update_end(&sq->stats.syncp); - return n - drops; + return ret; } static unsigned int virtnet_get_headroom(struct virtnet_info *vi) @@ -658,6 +686,7 @@ static struct sk_buff *receive_small(struct net_device *dev, xdp.rxq = &rq->xdp_rxq; orig_data = xdp.data; act = bpf_prog_run_xdp(xdp_prog, &xdp); + stats->rx.xdp_packets++; switch (act) { case XDP_PASS: @@ -666,11 +695,14 @@ static struct sk_buff *receive_small(struct net_device *dev, len = xdp.data_end - xdp.data; break; case XDP_TX: + stats->rx.xdp_tx++; xdpf = convert_to_xdp_frame(&xdp); if (unlikely(!xdpf)) goto err_xdp; + stats->tx.xdp_tx++; err = __virtnet_xdp_tx_xmit(vi, xdpf); if (unlikely(err)) { + stats->tx.xdp_tx_drops++; trace_xdp_exception(vi->dev, xdp_prog, act); goto err_xdp; } @@ -678,6 +710,7 @@ static struct sk_buff *receive_small(struct net_device *dev, rcu_read_unlock(); goto xdp_xmit; case XDP_REDIRECT: + stats->rx.xdp_redirects++; err = xdp_do_redirect(dev, &xdp, xdp_prog); if (err) goto err_xdp; @@ -711,6 +744,7 @@ err: err_xdp: rcu_read_unlock(); + stats->rx.xdp_drops++; stats->rx.drops++; put_page(page); xdp_xmit: @@ -808,6 +842,7 @@ static struct sk_buff *receive_mergeable(struct net_device *dev, xdp.rxq = &rq->xdp_rxq; act = bpf_prog_run_xdp(xdp_prog, &xdp); + stats->rx.xdp_packets++; switch (act) { case XDP_PASS: @@ -832,11 +867,14 @@ static struct sk_buff *receive_mergeable(struct net_device *dev, } break; case XDP_TX: + stats->rx.xdp_tx++; xdpf = convert_to_xdp_frame(&xdp); if (unlikely(!xdpf)) goto err_xdp; + stats->tx.xdp_tx++; err = __virtnet_xdp_tx_xmit(vi, xdpf); if (unlikely(err)) { + stats->tx.xdp_tx_drops++; trace_xdp_exception(vi->dev, xdp_prog, act); if (unlikely(xdp_page != page)) put_page(xdp_page); @@ -848,6 +886,7 @@ static struct sk_buff *receive_mergeable(struct net_device *dev, rcu_read_unlock(); goto xdp_xmit; case XDP_REDIRECT: + stats->rx.xdp_redirects++; err = xdp_do_redirect(dev, &xdp, xdp_prog); if (err) { if (unlikely(xdp_page != page)) @@ -943,6 +982,7 @@ static struct sk_buff *receive_mergeable(struct net_device *dev, err_xdp: rcu_read_unlock(); + stats->rx.xdp_drops++; err_skb: put_page(page); while (num_buf-- > 1) { @@ -1262,6 +1302,7 @@ static int virtnet_receive(struct receive_queue *rq, int budget, { struct virtnet_info *vi = rq->vq->vdev->priv; struct virtnet_rx_stats stats = {}; + struct send_queue *sq; unsigned int len; void *buf; int i; @@ -1297,6 +1338,12 @@ static int virtnet_receive(struct receive_queue *rq, int budget, } u64_stats_update_end(&rq->stats.syncp); + sq = virtnet_xdp_sq(vi); + u64_stats_update_begin(&sq->stats.syncp); + sq->stats.xdp_tx += stats.tx.xdp_tx; + sq->stats.xdp_tx_drops += stats.tx.xdp_tx_drops; + u64_stats_update_end(&sq->stats.syncp); + return stats.rx.packets; } From 461f03dc99cf6afcc3c70aaac56c4a7eee5a62bd Mon Sep 17 00:00:00 2001 From: Toshiaki Makita Date: Mon, 23 Jul 2018 23:36:09 +0900 Subject: [PATCH 6/6] virtio_net: Add kick stats So we can infer the number of VM-Exits. Signed-off-by: Toshiaki Makita Signed-off-by: David S. Miller --- drivers/net/virtio_net.c | 34 ++++++++++++++++++++++++++++------ 1 file changed, 28 insertions(+), 6 deletions(-) diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c index cb4ef331567c..1880c86e84b4 100644 --- a/drivers/net/virtio_net.c +++ b/drivers/net/virtio_net.c @@ -84,6 +84,7 @@ struct virtnet_sq_stats { u64 bytes; u64 xdp_tx; u64 xdp_tx_drops; + u64 kicks; }; struct virtnet_rq_stat_items { @@ -94,6 +95,7 @@ struct virtnet_rq_stat_items { u64 xdp_tx; u64 xdp_redirects; u64 xdp_drops; + u64 kicks; }; struct virtnet_rq_stats { @@ -117,6 +119,7 @@ static const struct virtnet_stat_desc virtnet_sq_stats_desc[] = { { "bytes", VIRTNET_SQ_STAT(bytes) }, { "xdp_tx", VIRTNET_SQ_STAT(xdp_tx) }, { "xdp_tx_drops", VIRTNET_SQ_STAT(xdp_tx_drops) }, + { "kicks", VIRTNET_SQ_STAT(kicks) }, }; static const struct virtnet_stat_desc virtnet_rq_stats_desc[] = { @@ -127,6 +130,7 @@ static const struct virtnet_stat_desc virtnet_rq_stats_desc[] = { { "xdp_tx", VIRTNET_RQ_STAT(xdp_tx) }, { "xdp_redirects", VIRTNET_RQ_STAT(xdp_redirects) }, { "xdp_drops", VIRTNET_RQ_STAT(xdp_drops) }, + { "kicks", VIRTNET_RQ_STAT(kicks) }, }; #define VIRTNET_SQ_STATS_LEN ARRAY_SIZE(virtnet_sq_stats_desc) @@ -507,6 +511,7 @@ static int virtnet_xdp_xmit(struct net_device *dev, struct send_queue *sq; unsigned int len; int drops = 0; + int kicks = 0; int ret, err; int i; @@ -543,12 +548,15 @@ static int virtnet_xdp_xmit(struct net_device *dev, } ret = n - drops; - if (flags & XDP_XMIT_FLUSH) - virtqueue_kick(sq->vq); + if (flags & XDP_XMIT_FLUSH) { + if (virtqueue_kick_prepare(sq->vq) && virtqueue_notify(sq->vq)) + kicks = 1; + } out: u64_stats_update_begin(&sq->stats.syncp); sq->stats.xdp_tx += n; sq->stats.xdp_tx_drops += drops; + sq->stats.kicks += kicks; u64_stats_update_end(&sq->stats.syncp); return ret; @@ -1226,7 +1234,12 @@ static bool try_fill_recv(struct virtnet_info *vi, struct receive_queue *rq, if (err) break; } while (rq->vq->num_free); - virtqueue_kick(rq->vq); + if (virtqueue_kick_prepare(rq->vq) && virtqueue_notify(rq->vq)) { + u64_stats_update_begin(&rq->stats.syncp); + rq->stats.items.kicks++; + u64_stats_update_end(&rq->stats.syncp); + } + return !oom; } @@ -1416,7 +1429,11 @@ static int virtnet_poll(struct napi_struct *napi, int budget) if (xdp_xmit & VIRTIO_XDP_TX) { sq = virtnet_xdp_sq(vi); - virtqueue_kick(sq->vq); + if (virtqueue_kick_prepare(sq->vq) && virtqueue_notify(sq->vq)) { + u64_stats_update_begin(&sq->stats.syncp); + sq->stats.kicks++; + u64_stats_update_end(&sq->stats.syncp); + } } return received; @@ -1578,8 +1595,13 @@ static netdev_tx_t start_xmit(struct sk_buff *skb, struct net_device *dev) } } - if (kick || netif_xmit_stopped(txq)) - virtqueue_kick(sq->vq); + if (kick || netif_xmit_stopped(txq)) { + if (virtqueue_kick_prepare(sq->vq) && virtqueue_notify(sq->vq)) { + u64_stats_update_begin(&sq->stats.syncp); + sq->stats.kicks++; + u64_stats_update_end(&sq->stats.syncp); + } + } return NETDEV_TX_OK; }