virtio_net: make all RX paths handle erors consistently
receive mergeable now handles errors internally. Do same for big and small packet paths, otherwise the logic is too hard to follow. Signed-off-by: Michael S. Tsirkin <mst@redhat.com> Acked-by: Jason Wang <jasowang@redhat.com> Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
parent
8fc3b9e9a2
commit
f121159d72
@ -299,6 +299,35 @@ static struct sk_buff *page_to_skb(struct receive_queue *rq,
|
|||||||
return skb;
|
return skb;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static struct sk_buff *receive_small(void *buf, unsigned int len)
|
||||||
|
{
|
||||||
|
struct sk_buff * skb = buf;
|
||||||
|
|
||||||
|
len -= sizeof(struct virtio_net_hdr);
|
||||||
|
skb_trim(skb, len);
|
||||||
|
|
||||||
|
return skb;
|
||||||
|
}
|
||||||
|
|
||||||
|
static struct sk_buff *receive_big(struct net_device *dev,
|
||||||
|
struct receive_queue *rq,
|
||||||
|
void *buf,
|
||||||
|
unsigned int len)
|
||||||
|
{
|
||||||
|
struct page *page = buf;
|
||||||
|
struct sk_buff *skb = page_to_skb(rq, page, 0, len, PAGE_SIZE);
|
||||||
|
|
||||||
|
if (unlikely(!skb))
|
||||||
|
goto err;
|
||||||
|
|
||||||
|
return skb;
|
||||||
|
|
||||||
|
err:
|
||||||
|
dev->stats.rx_dropped++;
|
||||||
|
give_pages(rq, page);
|
||||||
|
return NULL;
|
||||||
|
}
|
||||||
|
|
||||||
static struct sk_buff *receive_mergeable(struct net_device *dev,
|
static struct sk_buff *receive_mergeable(struct net_device *dev,
|
||||||
struct receive_queue *rq,
|
struct receive_queue *rq,
|
||||||
void *buf,
|
void *buf,
|
||||||
@ -392,7 +421,6 @@ static void receive_buf(struct receive_queue *rq, void *buf, unsigned int len)
|
|||||||
struct net_device *dev = vi->dev;
|
struct net_device *dev = vi->dev;
|
||||||
struct virtnet_stats *stats = this_cpu_ptr(vi->stats);
|
struct virtnet_stats *stats = this_cpu_ptr(vi->stats);
|
||||||
struct sk_buff *skb;
|
struct sk_buff *skb;
|
||||||
struct page *page;
|
|
||||||
struct skb_vnet_hdr *hdr;
|
struct skb_vnet_hdr *hdr;
|
||||||
|
|
||||||
if (unlikely(len < sizeof(struct virtio_net_hdr) + ETH_HLEN)) {
|
if (unlikely(len < sizeof(struct virtio_net_hdr) + ETH_HLEN)) {
|
||||||
@ -407,23 +435,15 @@ static void receive_buf(struct receive_queue *rq, void *buf, unsigned int len)
|
|||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (!vi->mergeable_rx_bufs && !vi->big_packets) {
|
if (vi->mergeable_rx_bufs)
|
||||||
skb = buf;
|
|
||||||
len -= sizeof(struct virtio_net_hdr);
|
|
||||||
skb_trim(skb, len);
|
|
||||||
} else if (vi->mergeable_rx_bufs) {
|
|
||||||
skb = receive_mergeable(dev, rq, buf, len);
|
skb = receive_mergeable(dev, rq, buf, len);
|
||||||
if (unlikely(!skb))
|
else if (vi->big_packets)
|
||||||
return;
|
skb = receive_big(dev, rq, buf, len);
|
||||||
} else {
|
else
|
||||||
page = buf;
|
skb = receive_small(buf, len);
|
||||||
skb = page_to_skb(rq, page, 0, len, PAGE_SIZE);
|
|
||||||
if (unlikely(!skb)) {
|
if (unlikely(!skb))
|
||||||
dev->stats.rx_dropped++;
|
return;
|
||||||
give_pages(rq, page);
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
hdr = skb_vnet_hdr(skb);
|
hdr = skb_vnet_hdr(skb);
|
||||||
|
|
||||||
|
Loading…
x
Reference in New Issue
Block a user