virtio-net: support multi-buffer xdp
Driver can pass the skb to stack by build_skb_from_xdp_buff(). Driver forwards multi-buffer packets using the send queue when XDP_TX and XDP_REDIRECT, and clears the reference of multi pages when XDP_DROP. Signed-off-by: Heng Qi <hengqi@linux.alibaba.com> Reviewed-by: Xuan Zhuo <xuanzhuo@linux.alibaba.com> Acked-by: Jason Wang <jasowang@redhat.com> Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
parent
18117a842a
commit
fab89bafa9
@ -1090,7 +1090,6 @@ static struct sk_buff *receive_mergeable(struct net_device *dev,
|
|||||||
struct bpf_prog *xdp_prog;
|
struct bpf_prog *xdp_prog;
|
||||||
unsigned int truesize = mergeable_ctx_to_truesize(ctx);
|
unsigned int truesize = mergeable_ctx_to_truesize(ctx);
|
||||||
unsigned int headroom = mergeable_ctx_to_headroom(ctx);
|
unsigned int headroom = mergeable_ctx_to_headroom(ctx);
|
||||||
unsigned int metasize = 0;
|
|
||||||
unsigned int tailroom = headroom ? sizeof(struct skb_shared_info) : 0;
|
unsigned int tailroom = headroom ? sizeof(struct skb_shared_info) : 0;
|
||||||
unsigned int room = SKB_DATA_ALIGN(headroom + tailroom);
|
unsigned int room = SKB_DATA_ALIGN(headroom + tailroom);
|
||||||
unsigned int frame_sz, xdp_room;
|
unsigned int frame_sz, xdp_room;
|
||||||
@ -1186,63 +1185,24 @@ static struct sk_buff *receive_mergeable(struct net_device *dev,
|
|||||||
|
|
||||||
switch (act) {
|
switch (act) {
|
||||||
case XDP_PASS:
|
case XDP_PASS:
|
||||||
metasize = xdp.data - xdp.data_meta;
|
if (unlikely(xdp_page != page))
|
||||||
|
|
||||||
/* recalculate offset to account for any header
|
|
||||||
* adjustments and minus the metasize to copy the
|
|
||||||
* metadata in page_to_skb(). Note other cases do not
|
|
||||||
* build an skb and avoid using offset
|
|
||||||
*/
|
|
||||||
offset = xdp.data - page_address(xdp_page) -
|
|
||||||
vi->hdr_len - metasize;
|
|
||||||
|
|
||||||
/* recalculate len if xdp.data, xdp.data_end or
|
|
||||||
* xdp.data_meta were adjusted
|
|
||||||
*/
|
|
||||||
len = xdp.data_end - xdp.data + vi->hdr_len + metasize;
|
|
||||||
|
|
||||||
/* recalculate headroom if xdp.data or xdp_data_meta
|
|
||||||
* were adjusted, note that offset should always point
|
|
||||||
* to the start of the reserved bytes for virtio_net
|
|
||||||
* header which are followed by xdp.data, that means
|
|
||||||
* that offset is equal to the headroom (when buf is
|
|
||||||
* starting at the beginning of the page, otherwise
|
|
||||||
* there is a base offset inside the page) but it's used
|
|
||||||
* with a different starting point (buf start) than
|
|
||||||
* xdp.data (buf start + vnet hdr size). If xdp.data or
|
|
||||||
* data_meta were adjusted by the xdp prog then the
|
|
||||||
* headroom size has changed and so has the offset, we
|
|
||||||
* can use data_hard_start, which points at buf start +
|
|
||||||
* vnet hdr size, to calculate the new headroom and use
|
|
||||||
* it later to compute buf start in page_to_skb()
|
|
||||||
*/
|
|
||||||
headroom = xdp.data - xdp.data_hard_start - metasize;
|
|
||||||
|
|
||||||
/* We can only create skb based on xdp_page. */
|
|
||||||
if (unlikely(xdp_page != page)) {
|
|
||||||
rcu_read_unlock();
|
|
||||||
put_page(page);
|
put_page(page);
|
||||||
head_skb = page_to_skb(vi, rq, xdp_page, offset,
|
head_skb = build_skb_from_xdp_buff(dev, vi, &xdp, xdp_frags_truesz);
|
||||||
len, PAGE_SIZE);
|
rcu_read_unlock();
|
||||||
return head_skb;
|
return head_skb;
|
||||||
}
|
|
||||||
break;
|
|
||||||
case XDP_TX:
|
case XDP_TX:
|
||||||
stats->xdp_tx++;
|
stats->xdp_tx++;
|
||||||
xdpf = xdp_convert_buff_to_frame(&xdp);
|
xdpf = xdp_convert_buff_to_frame(&xdp);
|
||||||
if (unlikely(!xdpf)) {
|
if (unlikely(!xdpf)) {
|
||||||
if (unlikely(xdp_page != page))
|
netdev_dbg(dev, "convert buff to frame failed for xdp\n");
|
||||||
put_page(xdp_page);
|
goto err_xdp_frags;
|
||||||
goto err_xdp;
|
|
||||||
}
|
}
|
||||||
err = virtnet_xdp_xmit(dev, 1, &xdpf, 0);
|
err = virtnet_xdp_xmit(dev, 1, &xdpf, 0);
|
||||||
if (unlikely(!err)) {
|
if (unlikely(!err)) {
|
||||||
xdp_return_frame_rx_napi(xdpf);
|
xdp_return_frame_rx_napi(xdpf);
|
||||||
} else if (unlikely(err < 0)) {
|
} else if (unlikely(err < 0)) {
|
||||||
trace_xdp_exception(vi->dev, xdp_prog, act);
|
trace_xdp_exception(vi->dev, xdp_prog, act);
|
||||||
if (unlikely(xdp_page != page))
|
goto err_xdp_frags;
|
||||||
put_page(xdp_page);
|
|
||||||
goto err_xdp;
|
|
||||||
}
|
}
|
||||||
*xdp_xmit |= VIRTIO_XDP_TX;
|
*xdp_xmit |= VIRTIO_XDP_TX;
|
||||||
if (unlikely(xdp_page != page))
|
if (unlikely(xdp_page != page))
|
||||||
@ -1252,11 +1212,8 @@ static struct sk_buff *receive_mergeable(struct net_device *dev,
|
|||||||
case XDP_REDIRECT:
|
case XDP_REDIRECT:
|
||||||
stats->xdp_redirects++;
|
stats->xdp_redirects++;
|
||||||
err = xdp_do_redirect(dev, &xdp, xdp_prog);
|
err = xdp_do_redirect(dev, &xdp, xdp_prog);
|
||||||
if (err) {
|
if (err)
|
||||||
if (unlikely(xdp_page != page))
|
goto err_xdp_frags;
|
||||||
put_page(xdp_page);
|
|
||||||
goto err_xdp;
|
|
||||||
}
|
|
||||||
*xdp_xmit |= VIRTIO_XDP_REDIR;
|
*xdp_xmit |= VIRTIO_XDP_REDIR;
|
||||||
if (unlikely(xdp_page != page))
|
if (unlikely(xdp_page != page))
|
||||||
put_page(page);
|
put_page(page);
|
||||||
@ -1269,9 +1226,7 @@ static struct sk_buff *receive_mergeable(struct net_device *dev,
|
|||||||
trace_xdp_exception(vi->dev, xdp_prog, act);
|
trace_xdp_exception(vi->dev, xdp_prog, act);
|
||||||
fallthrough;
|
fallthrough;
|
||||||
case XDP_DROP:
|
case XDP_DROP:
|
||||||
if (unlikely(xdp_page != page))
|
goto err_xdp_frags;
|
||||||
__free_pages(xdp_page, 0);
|
|
||||||
goto err_xdp;
|
|
||||||
}
|
}
|
||||||
err_xdp_frags:
|
err_xdp_frags:
|
||||||
if (unlikely(xdp_page != page))
|
if (unlikely(xdp_page != page))
|
||||||
|
Loading…
x
Reference in New Issue
Block a user