Daniel Borkmann says: ==================== pull-request: bpf 2022-04-27 We've added 5 non-merge commits during the last 20 day(s) which contain a total of 6 files changed, 34 insertions(+), 12 deletions(-). The main changes are: 1) Fix xsk sockets when rx and tx are separately bound to the same umem, also fix xsk copy mode combined with busy poll, from Maciej Fijalkowski. 2) Fix BPF tunnel/collect_md helpers with bpf_xmit lwt hook usage which triggered a crash due to invalid metadata_dst access, from Eyal Birger. 3) Fix release of page pool in XDP live packet mode, from Toke Høiland-Jørgensen. 4) Fix potential NULL pointer dereference in kretprobes, from Adam Zabrocki. (Masami & Steven preferred this small fix to be routed via bpf tree given it's follow-up fix to Masami's rethook work that went via bpf earlier, too.) * https://git.kernel.org/pub/scm/linux/kernel/git/bpf/bpf: xsk: Fix possible crash when multiple sockets are created kprobes: Fix KRETPROBES when CONFIG_KRETPROBE_ON_RETHOOK is set bpf, lwt: Fix crash when using bpf_skb_set_tunnel_key() from bpf_xmit lwt hook bpf: Fix release of page_pool in BPF_PROG_RUN in test runner xsk: Fix l2fwd for copy mode + busy poll combo ==================== Link: https://lore.kernel.org/r/20220427212748.9576-1-daniel@iogearbox.net Signed-off-by: Jakub Kicinski <kuba@kernel.org>
This commit is contained in:
commit
347cb5deae
@ -97,6 +97,7 @@ int xp_assign_dev(struct xsk_buff_pool *pool, struct net_device *dev,
|
||||
u16 queue_id, u16 flags);
|
||||
int xp_assign_dev_shared(struct xsk_buff_pool *pool, struct xdp_umem *umem,
|
||||
struct net_device *dev, u16 queue_id);
|
||||
int xp_alloc_tx_descs(struct xsk_buff_pool *pool, struct xdp_sock *xs);
|
||||
void xp_destroy(struct xsk_buff_pool *pool);
|
||||
void xp_get_pool(struct xsk_buff_pool *pool);
|
||||
bool xp_put_pool(struct xsk_buff_pool *pool);
|
||||
|
@ -2126,7 +2126,7 @@ static void kretprobe_rethook_handler(struct rethook_node *rh, void *data,
|
||||
struct kprobe_ctlblk *kcb;
|
||||
|
||||
/* The data must NOT be null. This means rethook data structure is broken. */
|
||||
if (WARN_ON_ONCE(!data))
|
||||
if (WARN_ON_ONCE(!data) || !rp->handler)
|
||||
return;
|
||||
|
||||
__this_cpu_write(current_kprobe, &rp->kp);
|
||||
|
@ -108,6 +108,7 @@ struct xdp_test_data {
|
||||
struct page_pool *pp;
|
||||
struct xdp_frame **frames;
|
||||
struct sk_buff **skbs;
|
||||
struct xdp_mem_info mem;
|
||||
u32 batch_size;
|
||||
u32 frame_cnt;
|
||||
};
|
||||
@ -147,7 +148,6 @@ static void xdp_test_run_init_page(struct page *page, void *arg)
|
||||
|
||||
static int xdp_test_run_setup(struct xdp_test_data *xdp, struct xdp_buff *orig_ctx)
|
||||
{
|
||||
struct xdp_mem_info mem = {};
|
||||
struct page_pool *pp;
|
||||
int err = -ENOMEM;
|
||||
struct page_pool_params pp_params = {
|
||||
@ -174,7 +174,7 @@ static int xdp_test_run_setup(struct xdp_test_data *xdp, struct xdp_buff *orig_c
|
||||
}
|
||||
|
||||
/* will copy 'mem.id' into pp->xdp_mem_id */
|
||||
err = xdp_reg_mem_model(&mem, MEM_TYPE_PAGE_POOL, pp);
|
||||
err = xdp_reg_mem_model(&xdp->mem, MEM_TYPE_PAGE_POOL, pp);
|
||||
if (err)
|
||||
goto err_mmodel;
|
||||
|
||||
@ -202,6 +202,7 @@ err_skbs:
|
||||
|
||||
static void xdp_test_run_teardown(struct xdp_test_data *xdp)
|
||||
{
|
||||
xdp_unreg_mem_model(&xdp->mem);
|
||||
page_pool_destroy(xdp->pp);
|
||||
kfree(xdp->frames);
|
||||
kfree(xdp->skbs);
|
||||
|
@ -159,10 +159,8 @@ static int bpf_output(struct net *net, struct sock *sk, struct sk_buff *skb)
|
||||
return dst->lwtstate->orig_output(net, sk, skb);
|
||||
}
|
||||
|
||||
static int xmit_check_hhlen(struct sk_buff *skb)
|
||||
static int xmit_check_hhlen(struct sk_buff *skb, int hh_len)
|
||||
{
|
||||
int hh_len = skb_dst(skb)->dev->hard_header_len;
|
||||
|
||||
if (skb_headroom(skb) < hh_len) {
|
||||
int nhead = HH_DATA_ALIGN(hh_len - skb_headroom(skb));
|
||||
|
||||
@ -274,6 +272,7 @@ static int bpf_xmit(struct sk_buff *skb)
|
||||
|
||||
bpf = bpf_lwt_lwtunnel(dst->lwtstate);
|
||||
if (bpf->xmit.prog) {
|
||||
int hh_len = dst->dev->hard_header_len;
|
||||
__be16 proto = skb->protocol;
|
||||
int ret;
|
||||
|
||||
@ -291,7 +290,7 @@ static int bpf_xmit(struct sk_buff *skb)
|
||||
/* If the header was expanded, headroom might be too
|
||||
* small for L2 header to come, expand as needed.
|
||||
*/
|
||||
ret = xmit_check_hhlen(skb);
|
||||
ret = xmit_check_hhlen(skb, hh_len);
|
||||
if (unlikely(ret))
|
||||
return ret;
|
||||
|
||||
|
@ -639,7 +639,7 @@ static int __xsk_sendmsg(struct socket *sock, struct msghdr *m, size_t total_len
|
||||
if (sk_can_busy_loop(sk))
|
||||
sk_busy_loop(sk, 1); /* only support non-blocking sockets */
|
||||
|
||||
if (xsk_no_wakeup(sk))
|
||||
if (xs->zc && xsk_no_wakeup(sk))
|
||||
return 0;
|
||||
|
||||
pool = xs->pool;
|
||||
@ -967,6 +967,19 @@ static int xsk_bind(struct socket *sock, struct sockaddr *addr, int addr_len)
|
||||
|
||||
xp_get_pool(umem_xs->pool);
|
||||
xs->pool = umem_xs->pool;
|
||||
|
||||
/* If underlying shared umem was created without Tx
|
||||
* ring, allocate Tx descs array that Tx batching API
|
||||
* utilizes
|
||||
*/
|
||||
if (xs->tx && !xs->pool->tx_descs) {
|
||||
err = xp_alloc_tx_descs(xs->pool, xs);
|
||||
if (err) {
|
||||
xp_put_pool(xs->pool);
|
||||
sockfd_put(sock);
|
||||
goto out_unlock;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
xdp_get_umem(umem_xs->umem);
|
||||
|
@ -42,6 +42,16 @@ void xp_destroy(struct xsk_buff_pool *pool)
|
||||
kvfree(pool);
|
||||
}
|
||||
|
||||
int xp_alloc_tx_descs(struct xsk_buff_pool *pool, struct xdp_sock *xs)
|
||||
{
|
||||
pool->tx_descs = kvcalloc(xs->tx->nentries, sizeof(*pool->tx_descs),
|
||||
GFP_KERNEL);
|
||||
if (!pool->tx_descs)
|
||||
return -ENOMEM;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
struct xsk_buff_pool *xp_create_and_assign_umem(struct xdp_sock *xs,
|
||||
struct xdp_umem *umem)
|
||||
{
|
||||
@ -59,11 +69,9 @@ struct xsk_buff_pool *xp_create_and_assign_umem(struct xdp_sock *xs,
|
||||
if (!pool->heads)
|
||||
goto out;
|
||||
|
||||
if (xs->tx) {
|
||||
pool->tx_descs = kcalloc(xs->tx->nentries, sizeof(*pool->tx_descs), GFP_KERNEL);
|
||||
if (!pool->tx_descs)
|
||||
if (xs->tx)
|
||||
if (xp_alloc_tx_descs(pool, xs))
|
||||
goto out;
|
||||
}
|
||||
|
||||
pool->chunk_mask = ~((u64)umem->chunk_size - 1);
|
||||
pool->addrs_cnt = umem->size;
|
||||
|
Loading…
Reference in New Issue
Block a user