virtio_net: xsk: bind/unbind xsk for rx
This patch implement the logic of bind/unbind xsk pool to rq. Signed-off-by: Xuan Zhuo <xuanzhuo@linux.alibaba.com> Acked-by: Jason Wang <jasowang@redhat.com> Acked-by: Michael S. Tsirkin <mst@redhat.com> Link: https://patch.msgid.link/20240708112537.96291-7-xuanzhuo@linux.alibaba.com Signed-off-by: Jakub Kicinski <kuba@kernel.org>
This commit is contained in:
parent
5db481059d
commit
09d2b3182c
@ -25,6 +25,7 @@
|
||||
#include <net/net_failover.h>
|
||||
#include <net/netdev_rx_queue.h>
|
||||
#include <net/netdev_queues.h>
|
||||
#include <net/xdp_sock_drv.h>
|
||||
|
||||
static int napi_weight = NAPI_POLL_WEIGHT;
|
||||
module_param(napi_weight, int, 0444);
|
||||
@ -348,6 +349,11 @@ struct receive_queue {
|
||||
|
||||
/* Record the last dma info to free after new pages is allocated. */
|
||||
struct virtnet_rq_dma *last_dma;
|
||||
|
||||
struct xsk_buff_pool *xsk_pool;
|
||||
|
||||
/* xdp rxq used by xsk */
|
||||
struct xdp_rxq_info xsk_rxq_info;
|
||||
};
|
||||
|
||||
/* This structure can contain rss message with maximum settings for indirection table and keysize
|
||||
@ -5026,6 +5032,132 @@ static int virtnet_restore_guest_offloads(struct virtnet_info *vi)
|
||||
return virtnet_set_guest_offloads(vi, offloads);
|
||||
}
|
||||
|
||||
static int virtnet_rq_bind_xsk_pool(struct virtnet_info *vi, struct receive_queue *rq,
|
||||
struct xsk_buff_pool *pool)
|
||||
{
|
||||
int err, qindex;
|
||||
|
||||
qindex = rq - vi->rq;
|
||||
|
||||
if (pool) {
|
||||
err = xdp_rxq_info_reg(&rq->xsk_rxq_info, vi->dev, qindex, rq->napi.napi_id);
|
||||
if (err < 0)
|
||||
return err;
|
||||
|
||||
err = xdp_rxq_info_reg_mem_model(&rq->xsk_rxq_info,
|
||||
MEM_TYPE_XSK_BUFF_POOL, NULL);
|
||||
if (err < 0)
|
||||
goto unreg;
|
||||
|
||||
xsk_pool_set_rxq_info(pool, &rq->xsk_rxq_info);
|
||||
}
|
||||
|
||||
virtnet_rx_pause(vi, rq);
|
||||
|
||||
err = virtqueue_reset(rq->vq, virtnet_rq_unmap_free_buf);
|
||||
if (err) {
|
||||
netdev_err(vi->dev, "reset rx fail: rx queue index: %d err: %d\n", qindex, err);
|
||||
|
||||
pool = NULL;
|
||||
}
|
||||
|
||||
rq->xsk_pool = pool;
|
||||
|
||||
virtnet_rx_resume(vi, rq);
|
||||
|
||||
if (pool)
|
||||
return 0;
|
||||
|
||||
unreg:
|
||||
xdp_rxq_info_unreg(&rq->xsk_rxq_info);
|
||||
return err;
|
||||
}
|
||||
|
||||
static int virtnet_xsk_pool_enable(struct net_device *dev,
|
||||
struct xsk_buff_pool *pool,
|
||||
u16 qid)
|
||||
{
|
||||
struct virtnet_info *vi = netdev_priv(dev);
|
||||
struct receive_queue *rq;
|
||||
struct device *dma_dev;
|
||||
struct send_queue *sq;
|
||||
int err;
|
||||
|
||||
if (vi->hdr_len > xsk_pool_get_headroom(pool))
|
||||
return -EINVAL;
|
||||
|
||||
/* In big_packets mode, xdp cannot work, so there is no need to
|
||||
* initialize xsk of rq.
|
||||
*/
|
||||
if (vi->big_packets && !vi->mergeable_rx_bufs)
|
||||
return -ENOENT;
|
||||
|
||||
if (qid >= vi->curr_queue_pairs)
|
||||
return -EINVAL;
|
||||
|
||||
sq = &vi->sq[qid];
|
||||
rq = &vi->rq[qid];
|
||||
|
||||
/* xsk assumes that tx and rx must have the same dma device. The af-xdp
|
||||
* may use one buffer to receive from the rx and reuse this buffer to
|
||||
* send by the tx. So the dma dev of sq and rq must be the same one.
|
||||
*
|
||||
* But vq->dma_dev allows every vq has the respective dma dev. So I
|
||||
* check the dma dev of vq and sq is the same dev.
|
||||
*/
|
||||
if (virtqueue_dma_dev(rq->vq) != virtqueue_dma_dev(sq->vq))
|
||||
return -EINVAL;
|
||||
|
||||
dma_dev = virtqueue_dma_dev(rq->vq);
|
||||
if (!dma_dev)
|
||||
return -EINVAL;
|
||||
|
||||
err = xsk_pool_dma_map(pool, dma_dev, 0);
|
||||
if (err)
|
||||
goto err_xsk_map;
|
||||
|
||||
err = virtnet_rq_bind_xsk_pool(vi, rq, pool);
|
||||
if (err)
|
||||
goto err_rq;
|
||||
|
||||
return 0;
|
||||
|
||||
err_rq:
|
||||
xsk_pool_dma_unmap(pool, 0);
|
||||
err_xsk_map:
|
||||
return err;
|
||||
}
|
||||
|
||||
static int virtnet_xsk_pool_disable(struct net_device *dev, u16 qid)
|
||||
{
|
||||
struct virtnet_info *vi = netdev_priv(dev);
|
||||
struct xsk_buff_pool *pool;
|
||||
struct receive_queue *rq;
|
||||
int err;
|
||||
|
||||
if (qid >= vi->curr_queue_pairs)
|
||||
return -EINVAL;
|
||||
|
||||
rq = &vi->rq[qid];
|
||||
|
||||
pool = rq->xsk_pool;
|
||||
|
||||
err = virtnet_rq_bind_xsk_pool(vi, rq, NULL);
|
||||
|
||||
xsk_pool_dma_unmap(pool, 0);
|
||||
|
||||
return err;
|
||||
}
|
||||
|
||||
static int virtnet_xsk_pool_setup(struct net_device *dev, struct netdev_bpf *xdp)
|
||||
{
|
||||
if (xdp->xsk.pool)
|
||||
return virtnet_xsk_pool_enable(dev, xdp->xsk.pool,
|
||||
xdp->xsk.queue_id);
|
||||
else
|
||||
return virtnet_xsk_pool_disable(dev, xdp->xsk.queue_id);
|
||||
}
|
||||
|
||||
static int virtnet_xdp_set(struct net_device *dev, struct bpf_prog *prog,
|
||||
struct netlink_ext_ack *extack)
|
||||
{
|
||||
@ -5151,6 +5283,8 @@ static int virtnet_xdp(struct net_device *dev, struct netdev_bpf *xdp)
|
||||
switch (xdp->command) {
|
||||
case XDP_SETUP_PROG:
|
||||
return virtnet_xdp_set(dev, xdp->prog, xdp->extack);
|
||||
case XDP_SETUP_XSK_POOL:
|
||||
return virtnet_xsk_pool_setup(dev, xdp);
|
||||
default:
|
||||
return -EINVAL;
|
||||
}
|
||||
|
Loading…
x
Reference in New Issue
Block a user