xsk: Add shared umem support between queue ids

Add support to share a umem between queue ids on the same
device. This mode can be invoked with the XDP_SHARED_UMEM bind
flag. Previously, sharing was only supported within the same
queue id and device, and you shared one set of fill and
completion rings. However, note that when sharing a umem between
queue ids, you need to create a fill ring and a completion ring
and tie them to the socket before you do the bind with the
XDP_SHARED_UMEM flag. This so that the single-producer
single-consumer semantics can be upheld.

Signed-off-by: Magnus Karlsson <magnus.karlsson@intel.com>
Signed-off-by: Daniel Borkmann <daniel@iogearbox.net>
Acked-by: Björn Töpel <bjorn.topel@intel.com>
Link: https://lore.kernel.org/bpf/1598603189-32145-12-git-send-email-magnus.karlsson@intel.com
This commit is contained in:
Magnus Karlsson 2020-08-28 10:26:25 +02:00 committed by Daniel Borkmann
parent 9647c57b11
commit b5aea28dca
3 changed files with 56 additions and 16 deletions

View File

@ -81,6 +81,8 @@ struct xsk_buff_pool *xp_create_and_assign_umem(struct xdp_sock *xs,
struct xdp_umem *umem); struct xdp_umem *umem);
int xp_assign_dev(struct xsk_buff_pool *pool, struct net_device *dev, int xp_assign_dev(struct xsk_buff_pool *pool, struct net_device *dev,
u16 queue_id, u16 flags); u16 queue_id, u16 flags);
int xp_assign_dev_shared(struct xsk_buff_pool *pool, struct xdp_umem *umem,
struct net_device *dev, u16 queue_id);
void xp_destroy(struct xsk_buff_pool *pool); void xp_destroy(struct xsk_buff_pool *pool);
void xp_release(struct xdp_buff_xsk *xskb); void xp_release(struct xdp_buff_xsk *xskb);
void xp_get_pool(struct xsk_buff_pool *pool); void xp_get_pool(struct xsk_buff_pool *pool);

View File

@ -689,12 +689,6 @@ static int xsk_bind(struct socket *sock, struct sockaddr *addr, int addr_len)
goto out_unlock; goto out_unlock;
} }
if (xs->fq_tmp || xs->cq_tmp) {
/* Do not allow setting your own fq or cq. */
err = -EINVAL;
goto out_unlock;
}
sock = xsk_lookup_xsk_from_fd(sxdp->sxdp_shared_umem_fd); sock = xsk_lookup_xsk_from_fd(sxdp->sxdp_shared_umem_fd);
if (IS_ERR(sock)) { if (IS_ERR(sock)) {
err = PTR_ERR(sock); err = PTR_ERR(sock);
@ -707,15 +701,41 @@ static int xsk_bind(struct socket *sock, struct sockaddr *addr, int addr_len)
sockfd_put(sock); sockfd_put(sock);
goto out_unlock; goto out_unlock;
} }
if (umem_xs->dev != dev || umem_xs->queue_id != qid) { if (umem_xs->dev != dev) {
err = -EINVAL; err = -EINVAL;
sockfd_put(sock); sockfd_put(sock);
goto out_unlock; goto out_unlock;
} }
/* Share the buffer pool with the other socket. */ if (umem_xs->queue_id != qid) {
xp_get_pool(umem_xs->pool); /* Share the umem with another socket on another qid */
xs->pool = umem_xs->pool; xs->pool = xp_create_and_assign_umem(xs,
umem_xs->umem);
if (!xs->pool) {
sockfd_put(sock);
goto out_unlock;
}
err = xp_assign_dev_shared(xs->pool, umem_xs->umem,
dev, qid);
if (err) {
xp_destroy(xs->pool);
sockfd_put(sock);
goto out_unlock;
}
} else {
/* Share the buffer pool with the other socket. */
if (xs->fq_tmp || xs->cq_tmp) {
/* Do not allow setting your own fq or cq. */
err = -EINVAL;
sockfd_put(sock);
goto out_unlock;
}
xp_get_pool(umem_xs->pool);
xs->pool = umem_xs->pool;
}
xdp_get_umem(umem_xs->umem); xdp_get_umem(umem_xs->umem);
WRITE_ONCE(xs->umem, umem_xs->umem); WRITE_ONCE(xs->umem, umem_xs->umem);
sockfd_put(sock); sockfd_put(sock);
@ -847,10 +867,6 @@ static int xsk_setsockopt(struct socket *sock, int level, int optname,
mutex_unlock(&xs->mutex); mutex_unlock(&xs->mutex);
return -EBUSY; return -EBUSY;
} }
if (!xs->umem) {
mutex_unlock(&xs->mutex);
return -EINVAL;
}
q = (optname == XDP_UMEM_FILL_RING) ? &xs->fq_tmp : q = (optname == XDP_UMEM_FILL_RING) ? &xs->fq_tmp :
&xs->cq_tmp; &xs->cq_tmp;

View File

@ -123,8 +123,8 @@ static void xp_disable_drv_zc(struct xsk_buff_pool *pool)
} }
} }
int xp_assign_dev(struct xsk_buff_pool *pool, struct net_device *netdev, static int __xp_assign_dev(struct xsk_buff_pool *pool,
u16 queue_id, u16 flags) struct net_device *netdev, u16 queue_id, u16 flags)
{ {
bool force_zc, force_copy; bool force_zc, force_copy;
struct netdev_bpf bpf; struct netdev_bpf bpf;
@ -193,6 +193,28 @@ err_unreg_pool:
return err; return err;
} }
int xp_assign_dev(struct xsk_buff_pool *pool, struct net_device *dev,
u16 queue_id, u16 flags)
{
return __xp_assign_dev(pool, dev, queue_id, flags);
}
int xp_assign_dev_shared(struct xsk_buff_pool *pool, struct xdp_umem *umem,
struct net_device *dev, u16 queue_id)
{
u16 flags;
/* One fill and completion ring required for each queue id. */
if (!pool->fq || !pool->cq)
return -EINVAL;
flags = umem->zc ? XDP_ZEROCOPY : XDP_COPY;
if (pool->uses_need_wakeup)
flags |= XDP_USE_NEED_WAKEUP;
return __xp_assign_dev(pool, dev, queue_id, flags);
}
void xp_clear_dev(struct xsk_buff_pool *pool) void xp_clear_dev(struct xsk_buff_pool *pool)
{ {
if (!pool->netdev) if (!pool->netdev)