From 5f5486b620cd43b16a1787ef92b9bc21bd72ef2e Mon Sep 17 00:00:00 2001 From: Jeremy Sowden Date: Wed, 26 Apr 2023 22:50:32 +0100 Subject: [PATCH 01/79] selftests/bpf: Fix pkg-config call building sign-file When building sign-file, the call to get the CFLAGS for libcrypto is missing white-space between `pkg-config` and `--cflags`: $(shell $(HOSTPKG_CONFIG)--cflags libcrypto 2> /dev/null) Removing the redirection of stderr, we see: $ make -C tools/testing/selftests/bpf sign-file make: Entering directory '[...]/tools/testing/selftests/bpf' make: pkg-config--cflags: No such file or directory SIGN-FILE sign-file make: Leaving directory '[...]/tools/testing/selftests/bpf' Add the missing space. Fixes: fc97590668ae ("selftests/bpf: Add test for bpf_verify_pkcs7_signature() kfunc") Signed-off-by: Jeremy Sowden Signed-off-by: Daniel Borkmann Reviewed-by: Roberto Sassu Link: https://lore.kernel.org/bpf/20230426215032.415792-1-jeremy@azazel.net Signed-off-by: Alexei Starovoitov --- tools/testing/selftests/bpf/Makefile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tools/testing/selftests/bpf/Makefile b/tools/testing/selftests/bpf/Makefile index c49e5403ad0e..28d2c77262be 100644 --- a/tools/testing/selftests/bpf/Makefile +++ b/tools/testing/selftests/bpf/Makefile @@ -197,7 +197,7 @@ $(OUTPUT)/urandom_read: urandom_read.c urandom_read_aux.c $(OUTPUT)/liburandom_r $(OUTPUT)/sign-file: ../../../../scripts/sign-file.c $(call msg,SIGN-FILE,,$@) - $(Q)$(CC) $(shell $(HOSTPKG_CONFIG)--cflags libcrypto 2> /dev/null) \ + $(Q)$(CC) $(shell $(HOSTPKG_CONFIG) --cflags libcrypto 2> /dev/null) \ $< -o $@ \ $(shell $(HOSTPKG_CONFIG) --libs libcrypto 2> /dev/null || echo -lcrypto) From e1505c1cc8d527fcc5bcaf9c1ad82eed817e3e10 Mon Sep 17 00:00:00 2001 From: Jakub Kicinski Date: Fri, 5 May 2023 14:58:36 -0700 Subject: [PATCH 02/79] bpf: netdev: init the offload table earlier Some netdevices may get unregistered before late_initcall(), we have to move the hashtable init earlier. Fixes: f1fc43d03946 ("bpf: Move offload initialization into late_initcall") Closes: https://bugzilla.kernel.org/show_bug.cgi?id=217399 Signed-off-by: Jakub Kicinski Acked-by: Stanislav Fomichev Link: https://lore.kernel.org/r/20230505215836.491485-1-kuba@kernel.org Signed-off-by: Alexei Starovoitov --- kernel/bpf/offload.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/kernel/bpf/offload.c b/kernel/bpf/offload.c index d9c9f45e3529..8a26cd8814c1 100644 --- a/kernel/bpf/offload.c +++ b/kernel/bpf/offload.c @@ -859,4 +859,4 @@ static int __init bpf_offload_init(void) return rhashtable_init(&offdevs, &offdevs_params); } -late_initcall(bpf_offload_init); +core_initcall(bpf_offload_init); From a820ca1a739b7e3ee0ddb48bdfa3c09dc7cd4302 Mon Sep 17 00:00:00 2001 From: Andrii Nakryiko Date: Mon, 15 May 2023 17:17:18 -0700 Subject: [PATCH 03/79] samples/bpf: Drop unnecessary fallthrough __fallthrough is now not supported. Instead of renaming it to now-canonical ([0]) fallthrough pseudo-keyword, just get rid of it and equate 'h' case to default case, as both emit usage information and succeed. [0] https://www.kernel.org/doc/html/latest/process/deprecated.html?highlight=fallthrough#implicit-switch-case-fall-through Signed-off-by: Andrii Nakryiko Signed-off-by: Daniel Borkmann Acked-by: Yonghong Song Link: https://lore.kernel.org/bpf/20230516001718.317177-1-andrii@kernel.org --- samples/bpf/hbm.c | 1 - 1 file changed, 1 deletion(-) diff --git a/samples/bpf/hbm.c b/samples/bpf/hbm.c index 6448b7826107..bf66277115e2 100644 --- a/samples/bpf/hbm.c +++ b/samples/bpf/hbm.c @@ -498,7 +498,6 @@ int main(int argc, char **argv) "Option -%c requires an argument.\n\n", optopt); case 'h': - __fallthrough; default: Usage(); return 0; From 7e01c7f7046efc2c7c192c3619db43292b98e997 Mon Sep 17 00:00:00 2001 From: Tudor Ambarus Date: Wed, 17 May 2023 13:38:08 +0000 Subject: [PATCH 04/79] net: cdc_ncm: Deal with too low values of dwNtbOutMaxSize Currently in cdc_ncm_check_tx_max(), if dwNtbOutMaxSize is lower than the calculated "min" value, but greater than zero, the logic sets tx_max to dwNtbOutMaxSize. This is then used to allocate a new SKB in cdc_ncm_fill_tx_frame() where all the data is handled. For small values of dwNtbOutMaxSize the memory allocated during alloc_skb(dwNtbOutMaxSize, GFP_ATOMIC) will have the same size, due to how size is aligned at alloc time: size = SKB_DATA_ALIGN(size); size += SKB_DATA_ALIGN(sizeof(struct skb_shared_info)); Thus we hit the same bug that we tried to squash with commit 2be6d4d16a084 ("net: cdc_ncm: Allow for dwNtbOutMaxSize to be unset or zero") Low values of dwNtbOutMaxSize do not cause an issue presently because at alloc_skb() time more memory (512b) is allocated than required for the SKB headers alone (320b), leaving some space (512b - 320b = 192b) for CDC data (172b). However, if more elements (for example 3 x u64 = [24b]) were added to one of the SKB header structs, say 'struct skb_shared_info', increasing its original size (320b [320b aligned]) to something larger (344b [384b aligned]), then suddenly the CDC data (172b) no longer fits in the spare SKB data area (512b - 384b = 128b). Consequently the SKB bounds checking semantics fails and panics: skbuff: skb_over_panic: text:ffffffff831f755b len:184 put:172 head:ffff88811f1c6c00 data:ffff88811f1c6c00 tail:0xb8 end:0x80 dev: ------------[ cut here ]------------ kernel BUG at net/core/skbuff.c:113! invalid opcode: 0000 [#1] PREEMPT SMP KASAN CPU: 0 PID: 57 Comm: kworker/0:2 Not tainted 5.15.106-syzkaller-00249-g19c0ed55a470 #0 Hardware name: Google Google Compute Engine/Google Compute Engine, BIOS Google 04/14/2023 Workqueue: mld mld_ifc_work RIP: 0010:skb_panic net/core/skbuff.c:113 [inline] RIP: 0010:skb_over_panic+0x14c/0x150 net/core/skbuff.c:118 [snip] Call Trace: skb_put+0x151/0x210 net/core/skbuff.c:2047 skb_put_zero include/linux/skbuff.h:2422 [inline] cdc_ncm_ndp16 drivers/net/usb/cdc_ncm.c:1131 [inline] cdc_ncm_fill_tx_frame+0x11ab/0x3da0 drivers/net/usb/cdc_ncm.c:1308 cdc_ncm_tx_fixup+0xa3/0x100 Deal with too low values of dwNtbOutMaxSize, clamp it in the range [USB_CDC_NCM_NTB_MIN_OUT_SIZE, CDC_NCM_NTB_MAX_SIZE_TX]. We ensure enough data space is allocated to handle CDC data by making sure dwNtbOutMaxSize is not smaller than USB_CDC_NCM_NTB_MIN_OUT_SIZE. Fixes: 289507d3364f ("net: cdc_ncm: use sysfs for rx/tx aggregation tuning") Cc: stable@vger.kernel.org Reported-by: syzbot+9f575a1f15fc0c01ed69@syzkaller.appspotmail.com Link: https://syzkaller.appspot.com/bug?extid=b982f1059506db48409d Link: https://lore.kernel.org/all/20211202143437.1411410-1-lee.jones@linaro.org/ Signed-off-by: Tudor Ambarus Reviewed-by: Simon Horman Link: https://lore.kernel.org/r/20230517133808.1873695-2-tudor.ambarus@linaro.org Signed-off-by: Jakub Kicinski --- drivers/net/usb/cdc_ncm.c | 24 +++++++++++++++--------- 1 file changed, 15 insertions(+), 9 deletions(-) diff --git a/drivers/net/usb/cdc_ncm.c b/drivers/net/usb/cdc_ncm.c index 6ce8f4f0c70e..db05622f1f70 100644 --- a/drivers/net/usb/cdc_ncm.c +++ b/drivers/net/usb/cdc_ncm.c @@ -181,9 +181,12 @@ static u32 cdc_ncm_check_tx_max(struct usbnet *dev, u32 new_tx) else min = ctx->max_datagram_size + ctx->max_ndp_size + sizeof(struct usb_cdc_ncm_nth32); - max = min_t(u32, CDC_NCM_NTB_MAX_SIZE_TX, le32_to_cpu(ctx->ncm_parm.dwNtbOutMaxSize)); - if (max == 0) + if (le32_to_cpu(ctx->ncm_parm.dwNtbOutMaxSize) == 0) max = CDC_NCM_NTB_MAX_SIZE_TX; /* dwNtbOutMaxSize not set */ + else + max = clamp_t(u32, le32_to_cpu(ctx->ncm_parm.dwNtbOutMaxSize), + USB_CDC_NCM_NTB_MIN_OUT_SIZE, + CDC_NCM_NTB_MAX_SIZE_TX); /* some devices set dwNtbOutMaxSize too low for the above default */ min = min(min, max); @@ -1244,6 +1247,9 @@ cdc_ncm_fill_tx_frame(struct usbnet *dev, struct sk_buff *skb, __le32 sign) * further. */ if (skb_out == NULL) { + /* If even the smallest allocation fails, abort. */ + if (ctx->tx_curr_size == USB_CDC_NCM_NTB_MIN_OUT_SIZE) + goto alloc_failed; ctx->tx_low_mem_max_cnt = min(ctx->tx_low_mem_max_cnt + 1, (unsigned)CDC_NCM_LOW_MEM_MAX_CNT); ctx->tx_low_mem_val = ctx->tx_low_mem_max_cnt; @@ -1262,13 +1268,8 @@ cdc_ncm_fill_tx_frame(struct usbnet *dev, struct sk_buff *skb, __le32 sign) skb_out = alloc_skb(ctx->tx_curr_size, GFP_ATOMIC); /* No allocation possible so we will abort */ - if (skb_out == NULL) { - if (skb != NULL) { - dev_kfree_skb_any(skb); - dev->net->stats.tx_dropped++; - } - goto exit_no_skb; - } + if (!skb_out) + goto alloc_failed; ctx->tx_low_mem_val--; } if (ctx->is_ndp16) { @@ -1461,6 +1462,11 @@ cdc_ncm_fill_tx_frame(struct usbnet *dev, struct sk_buff *skb, __le32 sign) return skb_out; +alloc_failed: + if (skb) { + dev_kfree_skb_any(skb); + dev->net->stats.tx_dropped++; + } exit_no_skb: /* Start timer, if there is a remaining non-empty skb */ if (ctx->tx_curr_skb != NULL && n > 0) From b3a03b540e3cf62a255213d084d76d71c02793d5 Mon Sep 17 00:00:00 2001 From: Jakub Kicinski Date: Tue, 16 May 2023 18:50:36 -0700 Subject: [PATCH 05/79] tls: rx: device: fix checking decryption status skb->len covers the entire skb, including the frag_list. In fact we're guaranteed that rxm->full_len <= skb->len, so since the change under Fixes we were not checking decrypt status of any skb but the first. Note that the skb_pagelen() added here may feel a bit costly, but it's removed by subsequent fixes, anyway. Reported-by: Tariq Toukan Fixes: 86b259f6f888 ("tls: rx: device: bound the frag walk") Tested-by: Shai Amiram Signed-off-by: Jakub Kicinski Reviewed-by: Simon Horman Signed-off-by: David S. Miller --- net/tls/tls_device.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/net/tls/tls_device.c b/net/tls/tls_device.c index a7cc4f9faac2..3b87c7b04ac8 100644 --- a/net/tls/tls_device.c +++ b/net/tls/tls_device.c @@ -1012,7 +1012,7 @@ int tls_device_decrypted(struct sock *sk, struct tls_context *tls_ctx) struct sk_buff *skb_iter; int left; - left = rxm->full_len - skb->len; + left = rxm->full_len + rxm->offset - skb_pagelen(skb); /* Check if all the data is decrypted already */ skb_iter = skb_shinfo(skb)->frag_list; while (skb_iter && left > 0) { From 210620ae44a83f25220450bbfcc22e6fe986b25f Mon Sep 17 00:00:00 2001 From: Jakub Kicinski Date: Tue, 16 May 2023 18:50:37 -0700 Subject: [PATCH 06/79] tls: rx: strp: set the skb->len of detached / CoW'ed skbs alloc_skb_with_frags() fills in page frag sizes but does not set skb->len and skb->data_len. Set those correctly otherwise device offload will most likely generate an empty skb and hit the BUG() at the end of __skb_nsg(). Fixes: 84c61fe1a75b ("tls: rx: do not use the standard strparser") Tested-by: Shai Amiram Signed-off-by: Jakub Kicinski Reviewed-by: Simon Horman Signed-off-by: David S. Miller --- net/tls/tls_strp.c | 2 ++ 1 file changed, 2 insertions(+) diff --git a/net/tls/tls_strp.c b/net/tls/tls_strp.c index 955ac3e0bf4d..24016c865e00 100644 --- a/net/tls/tls_strp.c +++ b/net/tls/tls_strp.c @@ -56,6 +56,8 @@ static struct sk_buff *tls_strp_msg_make_copy(struct tls_strparser *strp) offset += skb_frag_size(frag); } + skb->len = strp->stm.full_len; + skb->data_len = strp->stm.full_len; skb_copy_header(skb, strp->anchor); rxm = strp_msg(skb); rxm->offset = 0; From 14c4be92ebb3e36e392aa9dd8f314038a9f96f3c Mon Sep 17 00:00:00 2001 From: Jakub Kicinski Date: Tue, 16 May 2023 18:50:38 -0700 Subject: [PATCH 07/79] tls: rx: strp: force mixed decrypted records into copy mode If a record is partially decrypted we'll have to CoW it, anyway, so go into copy mode and allocate a writable skb right away. This will make subsequent fix simpler because we won't have to teach tls_strp_msg_make_copy() how to copy skbs while preserving decrypt status. Tested-by: Shai Amiram Signed-off-by: Jakub Kicinski Reviewed-by: Simon Horman Signed-off-by: David S. Miller --- include/linux/skbuff.h | 10 ++++++++++ net/tls/tls_strp.c | 16 +++++++++++----- 2 files changed, 21 insertions(+), 5 deletions(-) diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h index 738776ab8838..0b40417457cd 100644 --- a/include/linux/skbuff.h +++ b/include/linux/skbuff.h @@ -1587,6 +1587,16 @@ static inline void skb_copy_hash(struct sk_buff *to, const struct sk_buff *from) to->l4_hash = from->l4_hash; }; +static inline int skb_cmp_decrypted(const struct sk_buff *skb1, + const struct sk_buff *skb2) +{ +#ifdef CONFIG_TLS_DEVICE + return skb2->decrypted - skb1->decrypted; +#else + return 0; +#endif +} + static inline void skb_copy_decrypted(struct sk_buff *to, const struct sk_buff *from) { diff --git a/net/tls/tls_strp.c b/net/tls/tls_strp.c index 24016c865e00..2b6fa9855999 100644 --- a/net/tls/tls_strp.c +++ b/net/tls/tls_strp.c @@ -317,15 +317,19 @@ static int tls_strp_read_copy(struct tls_strparser *strp, bool qshort) return 0; } -static bool tls_strp_check_no_dup(struct tls_strparser *strp) +static bool tls_strp_check_queue_ok(struct tls_strparser *strp) { unsigned int len = strp->stm.offset + strp->stm.full_len; - struct sk_buff *skb; + struct sk_buff *first, *skb; u32 seq; - skb = skb_shinfo(strp->anchor)->frag_list; - seq = TCP_SKB_CB(skb)->seq; + first = skb_shinfo(strp->anchor)->frag_list; + skb = first; + seq = TCP_SKB_CB(first)->seq; + /* Make sure there's no duplicate data in the queue, + * and the decrypted status matches. + */ while (skb->len < len) { seq += skb->len; len -= skb->len; @@ -333,6 +337,8 @@ static bool tls_strp_check_no_dup(struct tls_strparser *strp) if (TCP_SKB_CB(skb)->seq != seq) return false; + if (skb_cmp_decrypted(first, skb)) + return false; } return true; @@ -413,7 +419,7 @@ static int tls_strp_read_sock(struct tls_strparser *strp) return tls_strp_read_copy(strp, true); } - if (!tls_strp_check_no_dup(strp)) + if (!tls_strp_check_queue_ok(strp)) return tls_strp_read_copy(strp, false); strp->msg_ready = 1; From 8b0c0dc9fbbd01e58a573a41c38885f9e4c17696 Mon Sep 17 00:00:00 2001 From: Jakub Kicinski Date: Tue, 16 May 2023 18:50:39 -0700 Subject: [PATCH 08/79] tls: rx: strp: fix determining record length in copy mode We call tls_rx_msg_size(skb) before doing skb->len += chunk. So the tls_rx_msg_size() code will see old skb->len, most likely leading to an over-read. Worst case we will over read an entire record, next iteration will try to trim the skb but may end up turning frag len negative or discarding the subsequent record (since we already told TCP we've read it during previous read but now we'll trim it out of the skb). Fixes: 84c61fe1a75b ("tls: rx: do not use the standard strparser") Tested-by: Shai Amiram Signed-off-by: Jakub Kicinski Reviewed-by: Simon Horman Signed-off-by: David S. Miller --- net/tls/tls_strp.c | 21 +++++++++++++++------ 1 file changed, 15 insertions(+), 6 deletions(-) diff --git a/net/tls/tls_strp.c b/net/tls/tls_strp.c index 2b6fa9855999..e2e48217e7ac 100644 --- a/net/tls/tls_strp.c +++ b/net/tls/tls_strp.c @@ -210,19 +210,28 @@ static int tls_strp_copyin(read_descriptor_t *desc, struct sk_buff *in_skb, skb_frag_size(frag), chunk)); - sz = tls_rx_msg_size(strp, strp->anchor); + skb->len += chunk; + skb->data_len += chunk; + skb_frag_size_add(frag, chunk); + + sz = tls_rx_msg_size(strp, skb); if (sz < 0) { desc->error = sz; return 0; } /* We may have over-read, sz == 0 is guaranteed under-read */ - if (sz > 0) - chunk = min_t(size_t, chunk, sz - skb->len); + if (unlikely(sz && sz < skb->len)) { + int over = skb->len - sz; + + WARN_ON_ONCE(over > chunk); + skb->len -= over; + skb->data_len -= over; + skb_frag_size_add(frag, -over); + + chunk -= over; + } - skb->len += chunk; - skb->data_len += chunk; - skb_frag_size_add(frag, chunk); frag++; len -= chunk; offset += chunk; From c1c607b1e5d5477d82ca6a86a05a4f10907b33ee Mon Sep 17 00:00:00 2001 From: Jakub Kicinski Date: Tue, 16 May 2023 18:50:40 -0700 Subject: [PATCH 09/79] tls: rx: strp: factor out copying skb data We'll need to copy input skbs individually in the next patch. Factor that code out (without assuming we're copying a full record). Tested-by: Shai Amiram Signed-off-by: Jakub Kicinski Reviewed-by: Simon Horman Signed-off-by: David S. Miller --- net/tls/tls_strp.c | 33 +++++++++++++++++++++++---------- 1 file changed, 23 insertions(+), 10 deletions(-) diff --git a/net/tls/tls_strp.c b/net/tls/tls_strp.c index e2e48217e7ac..61fbf84baf9e 100644 --- a/net/tls/tls_strp.c +++ b/net/tls/tls_strp.c @@ -34,31 +34,44 @@ static void tls_strp_anchor_free(struct tls_strparser *strp) strp->anchor = NULL; } -/* Create a new skb with the contents of input copied to its page frags */ -static struct sk_buff *tls_strp_msg_make_copy(struct tls_strparser *strp) +static struct sk_buff * +tls_strp_skb_copy(struct tls_strparser *strp, struct sk_buff *in_skb, + int offset, int len) { - struct strp_msg *rxm; struct sk_buff *skb; - int i, err, offset; + int i, err; - skb = alloc_skb_with_frags(0, strp->stm.full_len, TLS_PAGE_ORDER, + skb = alloc_skb_with_frags(0, len, TLS_PAGE_ORDER, &err, strp->sk->sk_allocation); if (!skb) return NULL; - offset = strp->stm.offset; for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; - WARN_ON_ONCE(skb_copy_bits(strp->anchor, offset, + WARN_ON_ONCE(skb_copy_bits(in_skb, offset, skb_frag_address(frag), skb_frag_size(frag))); offset += skb_frag_size(frag); } - skb->len = strp->stm.full_len; - skb->data_len = strp->stm.full_len; - skb_copy_header(skb, strp->anchor); + skb->len = len; + skb->data_len = len; + skb_copy_header(skb, in_skb); + return skb; +} + +/* Create a new skb with the contents of input copied to its page frags */ +static struct sk_buff *tls_strp_msg_make_copy(struct tls_strparser *strp) +{ + struct strp_msg *rxm; + struct sk_buff *skb; + + skb = tls_strp_skb_copy(strp, strp->anchor, strp->stm.offset, + strp->stm.full_len); + if (!skb) + return NULL; + rxm = strp_msg(skb); rxm->offset = 0; return skb; From eca9bfafee3a0487e59c59201ae14c7594ba940a Mon Sep 17 00:00:00 2001 From: Jakub Kicinski Date: Tue, 16 May 2023 18:50:41 -0700 Subject: [PATCH 10/79] tls: rx: strp: preserve decryption status of skbs when needed When receive buffer is small we try to copy out the data from TCP into a skb maintained by TLS to prevent connection from stalling. Unfortunately if a single record is made up of a mix of decrypted and non-decrypted skbs combining them into a single skb leads to loss of decryption status, resulting in decryption errors or data corruption. Similarly when trying to use TCP receive queue directly we need to make sure that all the skbs within the record have the same status. If we don't the mixed status will be detected correctly but we'll CoW the anchor, again collapsing it into a single paged skb without decrypted status preserved. So the "fixup" code will not know which parts of skb to re-encrypt. Fixes: 84c61fe1a75b ("tls: rx: do not use the standard strparser") Tested-by: Shai Amiram Signed-off-by: Jakub Kicinski Reviewed-by: Simon Horman Signed-off-by: David S. Miller --- include/net/tls.h | 1 + net/tls/tls.h | 5 ++ net/tls/tls_device.c | 20 +++----- net/tls/tls_strp.c | 117 ++++++++++++++++++++++++++++++++++++------- 4 files changed, 113 insertions(+), 30 deletions(-) diff --git a/include/net/tls.h b/include/net/tls.h index 6056ce5a2aa5..596595c4b1af 100644 --- a/include/net/tls.h +++ b/include/net/tls.h @@ -126,6 +126,7 @@ struct tls_strparser { u32 mark : 8; u32 stopped : 1; u32 copy_mode : 1; + u32 mixed_decrypted : 1; u32 msg_ready : 1; struct strp_msg stm; diff --git a/net/tls/tls.h b/net/tls/tls.h index 804c3880d028..0672acab2773 100644 --- a/net/tls/tls.h +++ b/net/tls/tls.h @@ -167,6 +167,11 @@ static inline bool tls_strp_msg_ready(struct tls_sw_context_rx *ctx) return ctx->strp.msg_ready; } +static inline bool tls_strp_msg_mixed_decrypted(struct tls_sw_context_rx *ctx) +{ + return ctx->strp.mixed_decrypted; +} + #ifdef CONFIG_TLS_DEVICE int tls_device_init(void); void tls_device_cleanup(void); diff --git a/net/tls/tls_device.c b/net/tls/tls_device.c index 3b87c7b04ac8..bf69c9d6d06c 100644 --- a/net/tls/tls_device.c +++ b/net/tls/tls_device.c @@ -1007,20 +1007,14 @@ int tls_device_decrypted(struct sock *sk, struct tls_context *tls_ctx) struct tls_sw_context_rx *sw_ctx = tls_sw_ctx_rx(tls_ctx); struct sk_buff *skb = tls_strp_msg(sw_ctx); struct strp_msg *rxm = strp_msg(skb); - int is_decrypted = skb->decrypted; - int is_encrypted = !is_decrypted; - struct sk_buff *skb_iter; - int left; + int is_decrypted, is_encrypted; - left = rxm->full_len + rxm->offset - skb_pagelen(skb); - /* Check if all the data is decrypted already */ - skb_iter = skb_shinfo(skb)->frag_list; - while (skb_iter && left > 0) { - is_decrypted &= skb_iter->decrypted; - is_encrypted &= !skb_iter->decrypted; - - left -= skb_iter->len; - skb_iter = skb_iter->next; + if (!tls_strp_msg_mixed_decrypted(sw_ctx)) { + is_decrypted = skb->decrypted; + is_encrypted = !is_decrypted; + } else { + is_decrypted = 0; + is_encrypted = 0; } trace_tls_device_decrypted(sk, tcp_sk(sk)->copied_seq - rxm->full_len, diff --git a/net/tls/tls_strp.c b/net/tls/tls_strp.c index 61fbf84baf9e..da95abbb7ea3 100644 --- a/net/tls/tls_strp.c +++ b/net/tls/tls_strp.c @@ -29,7 +29,8 @@ static void tls_strp_anchor_free(struct tls_strparser *strp) struct skb_shared_info *shinfo = skb_shinfo(strp->anchor); DEBUG_NET_WARN_ON_ONCE(atomic_read(&shinfo->dataref) != 1); - shinfo->frag_list = NULL; + if (!strp->copy_mode) + shinfo->frag_list = NULL; consume_skb(strp->anchor); strp->anchor = NULL; } @@ -195,22 +196,22 @@ static void tls_strp_flush_anchor_copy(struct tls_strparser *strp) for (i = 0; i < shinfo->nr_frags; i++) __skb_frag_unref(&shinfo->frags[i], false); shinfo->nr_frags = 0; + if (strp->copy_mode) { + kfree_skb_list(shinfo->frag_list); + shinfo->frag_list = NULL; + } strp->copy_mode = 0; + strp->mixed_decrypted = 0; } -static int tls_strp_copyin(read_descriptor_t *desc, struct sk_buff *in_skb, - unsigned int offset, size_t in_len) +static int tls_strp_copyin_frag(struct tls_strparser *strp, struct sk_buff *skb, + struct sk_buff *in_skb, unsigned int offset, + size_t in_len) { - struct tls_strparser *strp = (struct tls_strparser *)desc->arg.data; - struct sk_buff *skb; - skb_frag_t *frag; size_t len, chunk; + skb_frag_t *frag; int sz; - if (strp->msg_ready) - return 0; - - skb = strp->anchor; frag = &skb_shinfo(skb)->frags[skb->len / PAGE_SIZE]; len = in_len; @@ -228,10 +229,8 @@ static int tls_strp_copyin(read_descriptor_t *desc, struct sk_buff *in_skb, skb_frag_size_add(frag, chunk); sz = tls_rx_msg_size(strp, skb); - if (sz < 0) { - desc->error = sz; - return 0; - } + if (sz < 0) + return sz; /* We may have over-read, sz == 0 is guaranteed under-read */ if (unlikely(sz && sz < skb->len)) { @@ -271,15 +270,99 @@ static int tls_strp_copyin(read_descriptor_t *desc, struct sk_buff *in_skb, offset += chunk; } - if (strp->stm.full_len == skb->len) { +read_done: + return in_len - len; +} + +static int tls_strp_copyin_skb(struct tls_strparser *strp, struct sk_buff *skb, + struct sk_buff *in_skb, unsigned int offset, + size_t in_len) +{ + struct sk_buff *nskb, *first, *last; + struct skb_shared_info *shinfo; + size_t chunk; + int sz; + + if (strp->stm.full_len) + chunk = strp->stm.full_len - skb->len; + else + chunk = TLS_MAX_PAYLOAD_SIZE + PAGE_SIZE; + chunk = min(chunk, in_len); + + nskb = tls_strp_skb_copy(strp, in_skb, offset, chunk); + if (!nskb) + return -ENOMEM; + + shinfo = skb_shinfo(skb); + if (!shinfo->frag_list) { + shinfo->frag_list = nskb; + nskb->prev = nskb; + } else { + first = shinfo->frag_list; + last = first->prev; + last->next = nskb; + first->prev = nskb; + } + + skb->len += chunk; + skb->data_len += chunk; + + if (!strp->stm.full_len) { + sz = tls_rx_msg_size(strp, skb); + if (sz < 0) + return sz; + + /* We may have over-read, sz == 0 is guaranteed under-read */ + if (unlikely(sz && sz < skb->len)) { + int over = skb->len - sz; + + WARN_ON_ONCE(over > chunk); + skb->len -= over; + skb->data_len -= over; + __pskb_trim(nskb, nskb->len - over); + + chunk -= over; + } + + strp->stm.full_len = sz; + } + + return chunk; +} + +static int tls_strp_copyin(read_descriptor_t *desc, struct sk_buff *in_skb, + unsigned int offset, size_t in_len) +{ + struct tls_strparser *strp = (struct tls_strparser *)desc->arg.data; + struct sk_buff *skb; + int ret; + + if (strp->msg_ready) + return 0; + + skb = strp->anchor; + if (!skb->len) + skb_copy_decrypted(skb, in_skb); + else + strp->mixed_decrypted |= !!skb_cmp_decrypted(skb, in_skb); + + if (IS_ENABLED(CONFIG_TLS_DEVICE) && strp->mixed_decrypted) + ret = tls_strp_copyin_skb(strp, skb, in_skb, offset, in_len); + else + ret = tls_strp_copyin_frag(strp, skb, in_skb, offset, in_len); + if (ret < 0) { + desc->error = ret; + ret = 0; + } + + if (strp->stm.full_len && strp->stm.full_len == skb->len) { desc->count = 0; strp->msg_ready = 1; tls_rx_msg_ready(strp); } -read_done: - return in_len - len; + return ret; } static int tls_strp_read_copyin(struct tls_strparser *strp) From 74836ec828fe17b63f2006fdbf53311d691396bf Mon Sep 17 00:00:00 2001 From: Jakub Kicinski Date: Tue, 16 May 2023 18:50:42 -0700 Subject: [PATCH 11/79] tls: rx: strp: don't use GFP_KERNEL in softirq context When receive buffer is small, or the TCP rx queue looks too complicated to bother using it directly - we allocate a new skb and copy data into it. We already use sk->sk_allocation... but nothing actually sets it to GFP_ATOMIC on the ->sk_data_ready() path. Users of HW offload are far more likely to experience problems due to scheduling while atomic. "Copy mode" is very rarely triggered with SW crypto. Fixes: 84c61fe1a75b ("tls: rx: do not use the standard strparser") Tested-by: Shai Amiram Signed-off-by: Jakub Kicinski Reviewed-by: Simon Horman Signed-off-by: David S. Miller --- net/tls/tls_sw.c | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/net/tls/tls_sw.c b/net/tls/tls_sw.c index 635b8bf6b937..6e6a7c37d685 100644 --- a/net/tls/tls_sw.c +++ b/net/tls/tls_sw.c @@ -2304,10 +2304,14 @@ static void tls_data_ready(struct sock *sk) struct tls_context *tls_ctx = tls_get_ctx(sk); struct tls_sw_context_rx *ctx = tls_sw_ctx_rx(tls_ctx); struct sk_psock *psock; + gfp_t alloc_save; trace_sk_data_ready(sk); + alloc_save = sk->sk_allocation; + sk->sk_allocation = GFP_ATOMIC; tls_strp_data_ready(&ctx->strp); + sk->sk_allocation = alloc_save; psock = sk_psock_get(sk); if (psock) { From afbed3f74830163f9559579dee382cac3cff82da Mon Sep 17 00:00:00 2001 From: Jakub Kicinski Date: Tue, 16 May 2023 18:59:35 -0700 Subject: [PATCH 12/79] net/mlx5e: do as little as possible in napi poll when budget is 0 NAPI gets called with budget of 0 from netpoll, which has interrupts disabled. We should try to free some space on Tx rings and nothing else. Specifically do not try to handle XDP TX or try to refill Rx buffers - we can't use the page pool from IRQ context. Don't check if IRQs moved, either, that makes no sense in netpoll. Netpoll calls _all_ the rings from whatever CPU it happens to be invoked on. In general do as little as possible, the work quickly adds up when there's tens of rings to poll. The immediate stack trace I was seeing is: __do_softirq+0xd1/0x2c0 __local_bh_enable_ip+0xc7/0x120 page_pool_put_defragged_page+0x267/0x320 mlx5e_free_xdpsq_desc+0x99/0xd0 mlx5e_poll_xdpsq_cq+0x138/0x3b0 mlx5e_napi_poll+0xc3/0x8b0 netpoll_poll_dev+0xce/0x150 AFAIU page pool takes a BH lock, releases it and since BH is now enabled tries to run softirqs. Reviewed-by: Tariq Toukan Fixes: 60bbf7eeef10 ("mlx5: use page_pool for xdp_return_frame call") Signed-off-by: Jakub Kicinski Reviewed-by: Simon Horman Signed-off-by: David S. Miller --- .../net/ethernet/mellanox/mlx5/core/en_txrx.c | 16 +++++++++------- 1 file changed, 9 insertions(+), 7 deletions(-) diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_txrx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_txrx.c index a50bfda18e96..fbb2d963fb7e 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_txrx.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_txrx.c @@ -161,20 +161,22 @@ int mlx5e_napi_poll(struct napi_struct *napi, int budget) } } + /* budget=0 means we may be in IRQ context, do as little as possible */ + if (unlikely(!budget)) + goto out; + busy |= mlx5e_poll_xdpsq_cq(&c->xdpsq.cq); if (c->xdp) busy |= mlx5e_poll_xdpsq_cq(&c->rq_xdpsq.cq); - if (likely(budget)) { /* budget=0 means: don't poll rx rings */ - if (xsk_open) - work_done = mlx5e_poll_rx_cq(&xskrq->cq, budget); + if (xsk_open) + work_done = mlx5e_poll_rx_cq(&xskrq->cq, budget); - if (likely(budget - work_done)) - work_done += mlx5e_poll_rx_cq(&rq->cq, budget - work_done); + if (likely(budget - work_done)) + work_done += mlx5e_poll_rx_cq(&rq->cq, budget - work_done); - busy |= work_done == budget; - } + busy |= work_done == budget; mlx5e_poll_ico_cq(&c->icosq.cq); if (mlx5e_poll_ico_cq(&c->async_icosq.cq)) From d226b1df361988f885c298737d6019c863a25f26 Mon Sep 17 00:00:00 2001 From: Po-Hsu Lin Date: Thu, 18 May 2023 12:37:59 +0800 Subject: [PATCH 13/79] selftests: fib_tests: mute cleanup error message In the end of the test, there will be an error message induced by the `ip netns del ns1` command in cleanup() Tests passed: 201 Tests failed: 0 Cannot remove namespace file "/run/netns/ns1": No such file or directory This can even be reproduced with just `./fib_tests.sh -h` as we're calling cleanup() on exit. Redirect the error message to /dev/null to mute it. V2: Update commit message and fixes tag. V3: resubmit due to missing netdev ML in V2 Fixes: b60417a9f2b8 ("selftest: fib_tests: Always cleanup before exit") Signed-off-by: Po-Hsu Lin Reviewed-by: Ido Schimmel Reviewed-by: Simon Horman Signed-off-by: David S. Miller --- tools/testing/selftests/net/fib_tests.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tools/testing/selftests/net/fib_tests.sh b/tools/testing/selftests/net/fib_tests.sh index 7da8ec838c63..35d89dfa6f11 100755 --- a/tools/testing/selftests/net/fib_tests.sh +++ b/tools/testing/selftests/net/fib_tests.sh @@ -68,7 +68,7 @@ setup() cleanup() { $IP link del dev dummy0 &> /dev/null - ip netns del ns1 + ip netns del ns1 &> /dev/null ip netns del ns2 &> /dev/null } From 35112271672ae98f45df7875244a4e33aa215e31 Mon Sep 17 00:00:00 2001 From: Wen Gu Date: Thu, 18 May 2023 13:14:55 +0800 Subject: [PATCH 14/79] net/smc: Reset connection when trying to use SMCRv2 fails. We found a crash when using SMCRv2 with 2 Mellanox ConnectX-4. It can be reproduced by: - smc_run nginx - smc_run wrk -t 32 -c 500 -d 30 http://: BUG: kernel NULL pointer dereference, address: 0000000000000014 #PF: supervisor read access in kernel mode #PF: error_code(0x0000) - not-present page PGD 8000000108713067 P4D 8000000108713067 PUD 151127067 PMD 0 Oops: 0000 [#1] PREEMPT SMP PTI CPU: 4 PID: 2441 Comm: kworker/4:249 Kdump: loaded Tainted: G W E 6.4.0-rc1+ #42 Workqueue: smc_hs_wq smc_listen_work [smc] RIP: 0010:smc_clc_send_confirm_accept+0x284/0x580 [smc] RSP: 0018:ffffb8294b2d7c78 EFLAGS: 00010a06 RAX: ffff8f1873238880 RBX: ffffb8294b2d7dc8 RCX: 0000000000000000 RDX: 00000000000000b4 RSI: 0000000000000001 RDI: 0000000000b40c00 RBP: ffffb8294b2d7db8 R08: ffff8f1815c5860c R09: 0000000000000000 R10: 0000000000000400 R11: 0000000000000000 R12: ffff8f1846f56180 R13: ffff8f1815c5860c R14: 0000000000000001 R15: 0000000000000001 FS: 0000000000000000(0000) GS:ffff8f1aefd00000(0000) knlGS:0000000000000000 CS: 0010 DS: 0000 ES: 0000 CR0: 0000000080050033 CR2: 0000000000000014 CR3: 00000001027a0001 CR4: 00000000003706e0 DR0: 0000000000000000 DR1: 0000000000000000 DR2: 0000000000000000 DR3: 0000000000000000 DR6: 00000000fffe0ff0 DR7: 0000000000000400 Call Trace: ? mlx5_ib_map_mr_sg+0xa1/0xd0 [mlx5_ib] ? smcr_buf_map_link+0x24b/0x290 [smc] ? __smc_buf_create+0x4ee/0x9b0 [smc] smc_clc_send_accept+0x4c/0xb0 [smc] smc_listen_work+0x346/0x650 [smc] ? __schedule+0x279/0x820 process_one_work+0x1e5/0x3f0 worker_thread+0x4d/0x2f0 ? __pfx_worker_thread+0x10/0x10 kthread+0xe5/0x120 ? __pfx_kthread+0x10/0x10 ret_from_fork+0x2c/0x50 During the CLC handshake, server sequentially tries available SMCRv2 and SMCRv1 devices in smc_listen_work(). If an SMCRv2 device is found. SMCv2 based link group and link will be assigned to the connection. Then assumed that some buffer assignment errors happen later in the CLC handshake, such as RMB registration failure, server will give up SMCRv2 and try SMCRv1 device instead. But the resources assigned to the connection won't be reset. When server tries SMCRv1 device, the connection creation process will be executed again. Since conn->lnk has been assigned when trying SMCRv2, it will not be set to the correct SMCRv1 link in smcr_lgr_conn_assign_link(). So in such situation, conn->lgr points to correct SMCRv1 link group but conn->lnk points to the SMCRv2 link mistakenly. Then in smc_clc_send_confirm_accept(), conn->rmb_desc->mr[link->link_idx] will be accessed. Since the link->link_idx is not correct, the related MR may not have been initialized, so crash happens. | Try SMCRv2 device first | |-> conn->lgr: assign existed SMCRv2 link group; | |-> conn->link: assign existed SMCRv2 link (link_idx may be 1 in SMC_LGR_SYMMETRIC); | |-> sndbuf & RMB creation fails, quit; | | Try SMCRv1 device then | |-> conn->lgr: create SMCRv1 link group and assign; | |-> conn->link: keep SMCRv2 link mistakenly; | |-> sndbuf & RMB creation succeed, only RMB->mr[link_idx = 0] | initialized. | | Then smc_clc_send_confirm_accept() accesses | conn->rmb_desc->mr[conn->link->link_idx, which is 1], then crash. v This patch tries to fix this by cleaning conn->lnk before assigning link. In addition, it is better to reset the connection and clean the resources assigned if trying SMCRv2 failed in buffer creation or registration. Fixes: e49300a6bf62 ("net/smc: add listen processing for SMC-Rv2") Link: https://lore.kernel.org/r/20220523055056.2078994-1-liuyacan@corp.netease.com/ Signed-off-by: Wen Gu Reviewed-by: Tony Lu Signed-off-by: David S. Miller --- net/smc/af_smc.c | 9 +++++++-- net/smc/smc_core.c | 1 + 2 files changed, 8 insertions(+), 2 deletions(-) diff --git a/net/smc/af_smc.c b/net/smc/af_smc.c index 50c38b624f77..538e9c6ec8c9 100644 --- a/net/smc/af_smc.c +++ b/net/smc/af_smc.c @@ -2000,8 +2000,10 @@ static int smc_listen_rdma_init(struct smc_sock *new_smc, return rc; /* create send buffer and rmb */ - if (smc_buf_create(new_smc, false)) + if (smc_buf_create(new_smc, false)) { + smc_conn_abort(new_smc, ini->first_contact_local); return SMC_CLC_DECL_MEM; + } return 0; } @@ -2217,8 +2219,11 @@ static void smc_find_rdma_v2_device_serv(struct smc_sock *new_smc, smcr_version = ini->smcr_version; ini->smcr_version = SMC_V2; rc = smc_listen_rdma_init(new_smc, ini); - if (!rc) + if (!rc) { rc = smc_listen_rdma_reg(new_smc, ini->first_contact_local); + if (rc) + smc_conn_abort(new_smc, ini->first_contact_local); + } if (!rc) return; ini->smcr_version = smcr_version; diff --git a/net/smc/smc_core.c b/net/smc/smc_core.c index 454356771cda..3f465faf2b68 100644 --- a/net/smc/smc_core.c +++ b/net/smc/smc_core.c @@ -127,6 +127,7 @@ static int smcr_lgr_conn_assign_link(struct smc_connection *conn, bool first) int i, j; /* do link balancing */ + conn->lnk = NULL; /* reset conn->lnk first */ for (i = 0; i < SMC_LINKS_PER_LGR_MAX; i++) { struct smc_link *lnk = &conn->lgr->lnk[i]; From cfcb942863f6fce9266e1957a021e6c7295dee42 Mon Sep 17 00:00:00 2001 From: Alejandro Lucero Date: Thu, 18 May 2023 06:48:22 +0100 Subject: [PATCH 15/79] sfc: fix devlink info error handling Avoid early devlink info return if errors arise with MCDI commands executed for getting the required info from the device. The rationale is some commands can fail but later ones could still give useful data. Moreover, some nvram partitions could not be present which needs to be handled as a non error. The specific errors are reported through system messages and if any error appears, it will be reported generically through extack. Fixes 14743ddd2495 ("sfc: add devlink info support for ef100") Signed-off-by: Alejandro Lucero Acked-by: Martin Habets Signed-off-by: David S. Miller --- drivers/net/ethernet/sfc/efx_devlink.c | 87 ++++++++++++-------------- 1 file changed, 41 insertions(+), 46 deletions(-) diff --git a/drivers/net/ethernet/sfc/efx_devlink.c b/drivers/net/ethernet/sfc/efx_devlink.c index 381b805659d3..ef9971cbb695 100644 --- a/drivers/net/ethernet/sfc/efx_devlink.c +++ b/drivers/net/ethernet/sfc/efx_devlink.c @@ -171,9 +171,14 @@ static int efx_devlink_info_nvram_partition(struct efx_nic *efx, rc = efx_mcdi_nvram_metadata(efx, partition_type, NULL, version, NULL, 0); + + /* If the partition does not exist, that is not an error. */ + if (rc == -ENOENT) + return 0; + if (rc) { - netif_err(efx, drv, efx->net_dev, "mcdi nvram %s: failed\n", - version_name); + netif_err(efx, drv, efx->net_dev, "mcdi nvram %s: failed (rc=%d)\n", + version_name, rc); return rc; } @@ -187,36 +192,33 @@ static int efx_devlink_info_nvram_partition(struct efx_nic *efx, static int efx_devlink_info_stored_versions(struct efx_nic *efx, struct devlink_info_req *req) { - int rc; + int err; - rc = efx_devlink_info_nvram_partition(efx, req, - NVRAM_PARTITION_TYPE_BUNDLE, - DEVLINK_INFO_VERSION_GENERIC_FW_BUNDLE_ID); - if (rc) - return rc; + /* We do not care here about the specific error but just if an error + * happened. The specific error will be reported inside the call + * through system messages, and if any error happened in any call + * below, we report it through extack. + */ + err = efx_devlink_info_nvram_partition(efx, req, + NVRAM_PARTITION_TYPE_BUNDLE, + DEVLINK_INFO_VERSION_GENERIC_FW_BUNDLE_ID); - rc = efx_devlink_info_nvram_partition(efx, req, - NVRAM_PARTITION_TYPE_MC_FIRMWARE, - DEVLINK_INFO_VERSION_GENERIC_FW_MGMT); - if (rc) - return rc; + err |= efx_devlink_info_nvram_partition(efx, req, + NVRAM_PARTITION_TYPE_MC_FIRMWARE, + DEVLINK_INFO_VERSION_GENERIC_FW_MGMT); - rc = efx_devlink_info_nvram_partition(efx, req, - NVRAM_PARTITION_TYPE_SUC_FIRMWARE, - EFX_DEVLINK_INFO_VERSION_FW_MGMT_SUC); - if (rc) - return rc; + err |= efx_devlink_info_nvram_partition(efx, req, + NVRAM_PARTITION_TYPE_SUC_FIRMWARE, + EFX_DEVLINK_INFO_VERSION_FW_MGMT_SUC); - rc = efx_devlink_info_nvram_partition(efx, req, - NVRAM_PARTITION_TYPE_EXPANSION_ROM, - EFX_DEVLINK_INFO_VERSION_FW_EXPROM); - if (rc) - return rc; + err |= efx_devlink_info_nvram_partition(efx, req, + NVRAM_PARTITION_TYPE_EXPANSION_ROM, + EFX_DEVLINK_INFO_VERSION_FW_EXPROM); - rc = efx_devlink_info_nvram_partition(efx, req, - NVRAM_PARTITION_TYPE_EXPANSION_UEFI, - EFX_DEVLINK_INFO_VERSION_FW_UEFI); - return rc; + err |= efx_devlink_info_nvram_partition(efx, req, + NVRAM_PARTITION_TYPE_EXPANSION_UEFI, + EFX_DEVLINK_INFO_VERSION_FW_UEFI); + return err; } #define EFX_VER_FLAG(_f) \ @@ -587,27 +589,20 @@ static int efx_devlink_info_get(struct devlink *devlink, { struct efx_devlink *devlink_private = devlink_priv(devlink); struct efx_nic *efx = devlink_private->efx; - int rc; + int err; - /* Several different MCDI commands are used. We report first error - * through extack returning at that point. Specific error - * information via system messages. + /* Several different MCDI commands are used. We report if errors + * happened through extack. Specific error information via system + * messages inside the calls. */ - rc = efx_devlink_info_board_cfg(efx, req); - if (rc) { - NL_SET_ERR_MSG_MOD(extack, "Getting board info failed"); - return rc; - } - rc = efx_devlink_info_stored_versions(efx, req); - if (rc) { - NL_SET_ERR_MSG_MOD(extack, "Getting stored versions failed"); - return rc; - } - rc = efx_devlink_info_running_versions(efx, req); - if (rc) { - NL_SET_ERR_MSG_MOD(extack, "Getting running versions failed"); - return rc; - } + err = efx_devlink_info_board_cfg(efx, req); + + err |= efx_devlink_info_stored_versions(efx, req); + + err |= efx_devlink_info_running_versions(efx, req); + + if (err) + NL_SET_ERR_MSG_MOD(extack, "Errors when getting device info. Check system messages"); return 0; } From de678ca38861f2eb58814048076dcf95ed1b5bf9 Mon Sep 17 00:00:00 2001 From: Sunil Goutham Date: Thu, 18 May 2023 12:10:42 +0530 Subject: [PATCH 16/79] octeontx2-pf: Fix TSOv6 offload HW adds segment size to the payload length in the IPv6 header. Fix payload length to just TCP header length instead of 'TCP header size + IPv6 header size'. Fixes: 86d7476078b8 ("octeontx2-pf: TCP segmentation offload support") Signed-off-by: Sunil Goutham Signed-off-by: Ratheesh Kannoth Signed-off-by: David S. Miller --- drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.c | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.c index 7045fedfd73a..7af223b0a37f 100644 --- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.c +++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.c @@ -652,9 +652,7 @@ static void otx2_sqe_add_ext(struct otx2_nic *pfvf, struct otx2_snd_queue *sq, htons(ext->lso_sb - skb_network_offset(skb)); } else if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6) { ext->lso_format = pfvf->hw.lso_tsov6_idx; - - ipv6_hdr(skb)->payload_len = - htons(ext->lso_sb - skb_network_offset(skb)); + ipv6_hdr(skb)->payload_len = htons(tcp_hdrlen(skb)); } else if (skb_shinfo(skb)->gso_type & SKB_GSO_UDP_L4) { __be16 l3_proto = vlan_get_protocol(skb); struct udphdr *udph = udp_hdr(skb); From 3be5f6cd4a52b72ae31b77171da0912829c02096 Mon Sep 17 00:00:00 2001 From: Vladimir Oltean Date: Thu, 18 May 2023 18:41:46 +0300 Subject: [PATCH 17/79] MAINTAINERS: add myself as maintainer for enetc I would like to be copied on new patches submitted on this driver. I am relatively familiar with the code, having practically maintained it for a while. Signed-off-by: Vladimir Oltean Acked-by: Claudiu Manoil Reviewed-by: Simon Horman Signed-off-by: David S. Miller --- MAINTAINERS | 1 + 1 file changed, 1 insertion(+) diff --git a/MAINTAINERS b/MAINTAINERS index 5c22c828ab46..02bfd2c39fe8 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -8159,6 +8159,7 @@ F: include/linux/spi/spi-fsl-dspi.h FREESCALE ENETC ETHERNET DRIVERS M: Claudiu Manoil +M: Vladimir Oltean L: netdev@vger.kernel.org S: Maintained F: drivers/net/ethernet/freescale/enetc/ From 9025944fddfed5966c8f102f1fe921ab3aee2c12 Mon Sep 17 00:00:00 2001 From: Shenwei Wang Date: Thu, 18 May 2023 10:02:02 -0500 Subject: [PATCH 18/79] net: fec: add dma_wmb to ensure correct descriptor values Two dma_wmb() are added in the XDP TX path to ensure proper ordering of descriptor and buffer updates: 1. A dma_wmb() is added after updating the last BD to make sure the updates to rest of the descriptor are visible before transferring ownership to FEC. 2. A dma_wmb() is also added after updating the bdp to ensure these updates are visible before updating txq->bd.cur. 3. Start the xmit of the frame immediately right after configuring the tx descriptor. Fixes: 6d6b39f180b8 ("net: fec: add initial XDP support") Signed-off-by: Shenwei Wang Reviewed-by: Wei Fang Signed-off-by: David S. Miller --- drivers/net/ethernet/freescale/fec_main.c | 17 +++++++++++------ 1 file changed, 11 insertions(+), 6 deletions(-) diff --git a/drivers/net/ethernet/freescale/fec_main.c b/drivers/net/ethernet/freescale/fec_main.c index 577d94821b3e..38e5b5abe067 100644 --- a/drivers/net/ethernet/freescale/fec_main.c +++ b/drivers/net/ethernet/freescale/fec_main.c @@ -3834,6 +3834,11 @@ static int fec_enet_txq_xmit_frame(struct fec_enet_private *fep, index = fec_enet_get_bd_index(last_bdp, &txq->bd); txq->tx_skbuff[index] = NULL; + /* Make sure the updates to rest of the descriptor are performed before + * transferring ownership. + */ + dma_wmb(); + /* Send it on its way. Tell FEC it's ready, interrupt when done, * it's the last BD of the frame, and to put the CRC on the end. */ @@ -3843,8 +3848,14 @@ static int fec_enet_txq_xmit_frame(struct fec_enet_private *fep, /* If this was the last BD in the ring, start at the beginning again. */ bdp = fec_enet_get_nextdesc(last_bdp, &txq->bd); + /* Make sure the update to bdp are performed before txq->bd.cur. */ + dma_wmb(); + txq->bd.cur = bdp; + /* Trigger transmission start */ + writel(0, txq->bd.reg_desc_active); + return 0; } @@ -3873,12 +3884,6 @@ static int fec_enet_xdp_xmit(struct net_device *dev, sent_frames++; } - /* Make sure the update to bdp and tx_skbuff are performed. */ - wmb(); - - /* Trigger transmission start */ - writel(0, txq->bd.reg_desc_active); - __netif_tx_unlock(nq); return sent_frames; From 0613d8ca9ab382caabe9ed2dceb429e9781e443f Mon Sep 17 00:00:00 2001 From: Will Deacon Date: Thu, 18 May 2023 11:25:28 +0100 Subject: [PATCH 19/79] bpf: Fix mask generation for 32-bit narrow loads of 64-bit fields A narrow load from a 64-bit context field results in a 64-bit load followed potentially by a 64-bit right-shift and then a bitwise AND operation to extract the relevant data. In the case of a 32-bit access, an immediate mask of 0xffffffff is used to construct a 64-bit BPP_AND operation which then sign-extends the mask value and effectively acts as a glorified no-op. For example: 0: 61 10 00 00 00 00 00 00 r0 = *(u32 *)(r1 + 0) results in the following code generation for a 64-bit field: ldr x7, [x7] // 64-bit load mov x10, #0xffffffffffffffff and x7, x7, x10 Fix the mask generation so that narrow loads always perform a 32-bit AND operation: ldr x7, [x7] // 64-bit load mov w10, #0xffffffff and w7, w7, w10 Cc: Alexei Starovoitov Cc: Daniel Borkmann Cc: John Fastabend Cc: Krzesimir Nowak Cc: Andrey Ignatov Acked-by: Yonghong Song Fixes: 31fd85816dbe ("bpf: permits narrower load from bpf program context fields") Signed-off-by: Will Deacon Link: https://lore.kernel.org/r/20230518102528.1341-1-will@kernel.org Signed-off-by: Alexei Starovoitov --- kernel/bpf/verifier.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c index fbcf5a4e2fcd..5871aa78d01a 100644 --- a/kernel/bpf/verifier.c +++ b/kernel/bpf/verifier.c @@ -17033,7 +17033,7 @@ static int convert_ctx_accesses(struct bpf_verifier_env *env) insn_buf[cnt++] = BPF_ALU64_IMM(BPF_RSH, insn->dst_reg, shift); - insn_buf[cnt++] = BPF_ALU64_IMM(BPF_AND, insn->dst_reg, + insn_buf[cnt++] = BPF_ALU32_IMM(BPF_AND, insn->dst_reg, (1ULL << size * 8) - 1); } } From ca1fd42e7dbfcb34890ffbf1f2f4b356776dab6f Mon Sep 17 00:00:00 2001 From: Ruihan Li Date: Wed, 3 May 2023 21:39:34 +0800 Subject: [PATCH 20/79] Bluetooth: Fix potential double free caused by hci_conn_unlink The hci_conn_unlink function is being called by hci_conn_del, which means it should not call hci_conn_del with the input parameter conn again. If it does, conn may have already been released when hci_conn_unlink returns, leading to potential UAF and double-free issues. This patch resolves the problem by modifying hci_conn_unlink to release only conn's child links when necessary, but never release conn itself. Reported-by: syzbot+690b90b14f14f43f4688@syzkaller.appspotmail.com Closes: https://lore.kernel.org/linux-bluetooth/000000000000484a8205faafe216@google.com/ Fixes: 06149746e720 ("Bluetooth: hci_conn: Add support for linking multiple hcon") Signed-off-by: Ruihan Li Signed-off-by: Luiz Augusto von Dentz Reported-by: syzbot+690b90b14f14f43f4688@syzkaller.appspotmail.com Reported-by: Luiz Augusto von Dentz Reported-by: syzbot+8bb72f86fc823817bc5d@syzkaller.appspotmail.com --- net/bluetooth/hci_conn.c | 21 ++++++++++++--------- 1 file changed, 12 insertions(+), 9 deletions(-) diff --git a/net/bluetooth/hci_conn.c b/net/bluetooth/hci_conn.c index 640b951bf40a..70e1655a9df6 100644 --- a/net/bluetooth/hci_conn.c +++ b/net/bluetooth/hci_conn.c @@ -1083,8 +1083,18 @@ static void hci_conn_unlink(struct hci_conn *conn) if (!conn->parent) { struct hci_link *link, *t; - list_for_each_entry_safe(link, t, &conn->link_list, list) - hci_conn_unlink(link->conn); + list_for_each_entry_safe(link, t, &conn->link_list, list) { + struct hci_conn *child = link->conn; + + hci_conn_unlink(child); + + /* Due to race, SCO connection might be not established + * yet at this point. Delete it now, otherwise it is + * possible for it to be stuck and can't be deleted. + */ + if (child->handle == HCI_CONN_HANDLE_UNSET) + hci_conn_del(child); + } return; } @@ -1100,13 +1110,6 @@ static void hci_conn_unlink(struct hci_conn *conn) kfree(conn->link); conn->link = NULL; - - /* Due to race, SCO connection might be not established - * yet at this point. Delete it now, otherwise it is - * possible for it to be stuck and can't be deleted. - */ - if (conn->handle == HCI_CONN_HANDLE_UNSET) - hci_conn_del(conn); } int hci_conn_del(struct hci_conn *conn) From 2910431ab0e500dfc5df12299bb15eef0f30b43e Mon Sep 17 00:00:00 2001 From: Ruihan Li Date: Wed, 3 May 2023 21:39:35 +0800 Subject: [PATCH 21/79] Bluetooth: Refcnt drop must be placed last in hci_conn_unlink If hci_conn_put(conn->parent) reduces conn->parent's reference count to zero, it can immediately deallocate conn->parent. At the same time, conn->link->list has its head in conn->parent, causing use-after-free problems in the latter list_del_rcu(&conn->link->list). This problem can be easily solved by reordering the two operations, i.e., first performing the list removal with list_del_rcu and then decreasing the refcnt with hci_conn_put. Reported-by: Luiz Augusto von Dentz Closes: https://lore.kernel.org/linux-bluetooth/CABBYNZ+1kce8_RJrLNOXd_8=Mdpb=2bx4Nto-hFORk=qiOkoCg@mail.gmail.com/ Fixes: 06149746e720 ("Bluetooth: hci_conn: Add support for linking multiple hcon") Signed-off-by: Ruihan Li Signed-off-by: Luiz Augusto von Dentz --- net/bluetooth/hci_conn.c | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/net/bluetooth/hci_conn.c b/net/bluetooth/hci_conn.c index 70e1655a9df6..44d0643fc681 100644 --- a/net/bluetooth/hci_conn.c +++ b/net/bluetooth/hci_conn.c @@ -1102,12 +1102,12 @@ static void hci_conn_unlink(struct hci_conn *conn) if (!conn->link) return; - hci_conn_put(conn->parent); - conn->parent = NULL; - list_del_rcu(&conn->link->list); synchronize_rcu(); + hci_conn_put(conn->parent); + conn->parent = NULL; + kfree(conn->link); conn->link = NULL; } From a2ac591cb4d83e1f2d4b4adb3c14b2c79764650a Mon Sep 17 00:00:00 2001 From: Ruihan Li Date: Wed, 3 May 2023 21:39:36 +0800 Subject: [PATCH 22/79] Bluetooth: Fix UAF in hci_conn_hash_flush again Commit 06149746e720 ("Bluetooth: hci_conn: Add support for linking multiple hcon") reintroduced a previously fixed bug [1] ("KASAN: slab-use-after-free Read in hci_conn_hash_flush"). This bug was originally fixed by commit 5dc7d23e167e ("Bluetooth: hci_conn: Fix possible UAF"). The hci_conn_unlink function was added to avoid invalidating the link traversal caused by successive hci_conn_del operations releasing extra connections. However, currently hci_conn_unlink itself also releases extra connections, resulted in the reintroduced bug. This patch follows a more robust solution for cleaning up all connections, by repeatedly removing the first connection until there are none left. This approach does not rely on the inner workings of hci_conn_del and ensures proper cleanup of all connections. Meanwhile, we need to make sure that hci_conn_del never fails. Indeed it doesn't, as it now always returns zero. To make this a bit clearer, this patch also changes its return type to void. Reported-by: syzbot+8bb72f86fc823817bc5d@syzkaller.appspotmail.com Closes: https://lore.kernel.org/linux-bluetooth/000000000000aa920505f60d25ad@google.com/ Fixes: 06149746e720 ("Bluetooth: hci_conn: Add support for linking multiple hcon") Signed-off-by: Ruihan Li Co-developed-by: Luiz Augusto von Dentz Signed-off-by: Luiz Augusto von Dentz --- include/net/bluetooth/hci_core.h | 2 +- net/bluetooth/hci_conn.c | 33 +++++++++++++++++++++----------- 2 files changed, 23 insertions(+), 12 deletions(-) diff --git a/include/net/bluetooth/hci_core.h b/include/net/bluetooth/hci_core.h index a6c8aee2f256..8baf34639939 100644 --- a/include/net/bluetooth/hci_core.h +++ b/include/net/bluetooth/hci_core.h @@ -1327,7 +1327,7 @@ int hci_le_create_cis(struct hci_conn *conn); struct hci_conn *hci_conn_add(struct hci_dev *hdev, int type, bdaddr_t *dst, u8 role); -int hci_conn_del(struct hci_conn *conn); +void hci_conn_del(struct hci_conn *conn); void hci_conn_hash_flush(struct hci_dev *hdev); void hci_conn_check_pending(struct hci_dev *hdev); diff --git a/net/bluetooth/hci_conn.c b/net/bluetooth/hci_conn.c index 44d0643fc681..ce588359b290 100644 --- a/net/bluetooth/hci_conn.c +++ b/net/bluetooth/hci_conn.c @@ -1088,6 +1088,14 @@ static void hci_conn_unlink(struct hci_conn *conn) hci_conn_unlink(child); + /* If hdev is down it means + * hci_dev_close_sync/hci_conn_hash_flush is in progress + * and links don't need to be cleanup as all connections + * would be cleanup. + */ + if (!test_bit(HCI_UP, &hdev->flags)) + continue; + /* Due to race, SCO connection might be not established * yet at this point. Delete it now, otherwise it is * possible for it to be stuck and can't be deleted. @@ -1112,7 +1120,7 @@ static void hci_conn_unlink(struct hci_conn *conn) conn->link = NULL; } -int hci_conn_del(struct hci_conn *conn) +void hci_conn_del(struct hci_conn *conn) { struct hci_dev *hdev = conn->hdev; @@ -1163,8 +1171,6 @@ int hci_conn_del(struct hci_conn *conn) * rest of hci_conn_del. */ hci_conn_cleanup(conn); - - return 0; } struct hci_dev *hci_get_route(bdaddr_t *dst, bdaddr_t *src, uint8_t src_type) @@ -2465,22 +2471,27 @@ timer: /* Drop all connection on the device */ void hci_conn_hash_flush(struct hci_dev *hdev) { - struct hci_conn_hash *h = &hdev->conn_hash; - struct hci_conn *c, *n; + struct list_head *head = &hdev->conn_hash.list; + struct hci_conn *conn; BT_DBG("hdev %s", hdev->name); - list_for_each_entry_safe(c, n, &h->list, list) { - c->state = BT_CLOSED; - - hci_disconn_cfm(c, HCI_ERROR_LOCAL_HOST_TERM); + /* We should not traverse the list here, because hci_conn_del + * can remove extra links, which may cause the list traversal + * to hit items that have already been released. + */ + while ((conn = list_first_entry_or_null(head, + struct hci_conn, + list)) != NULL) { + conn->state = BT_CLOSED; + hci_disconn_cfm(conn, HCI_ERROR_LOCAL_HOST_TERM); /* Unlink before deleting otherwise it is possible that * hci_conn_del removes the link which may cause the list to * contain items already freed. */ - hci_conn_unlink(c); - hci_conn_del(c); + hci_conn_unlink(conn); + hci_conn_del(conn); } } From a2904d2825536aa896a149a9174d11b0958e7095 Mon Sep 17 00:00:00 2001 From: Ruihan Li Date: Wed, 3 May 2023 21:39:37 +0800 Subject: [PATCH 23/79] Bluetooth: Unlink CISes when LE disconnects in hci_conn_del Currently, hci_conn_del calls hci_conn_unlink for BR/EDR, (e)SCO, and CIS connections, i.e., everything except LE connections. However, if (e)SCO connections are unlinked when BR/EDR disconnects, CIS connections should also be unlinked when LE disconnects. In terms of disconnection behavior, CIS and (e)SCO connections are not too different. One peculiarity of CIS is that when CIS connections are disconnected, the CIS handle isn't deleted, as per [BLUETOOTH CORE SPECIFICATION Version 5.4 | Vol 4, Part E] 7.1.6 Disconnect command: All SCO, eSCO, and CIS connections on a physical link should be disconnected before the ACL connection on the same physical connection is disconnected. If it does not, they will be implicitly disconnected as part of the ACL disconnection. ... Note: As specified in Section 7.7.5, on the Central, the handle for a CIS remains valid even after disconnection and, therefore, the Host can recreate a disconnected CIS at a later point in time using the same connection handle. Since hci_conn_link invokes both hci_conn_get and hci_conn_hold, hci_conn_unlink should perform both hci_conn_put and hci_conn_drop as well. However, currently it performs only hci_conn_put. This patch makes hci_conn_unlink call hci_conn_drop as well, which simplifies the logic in hci_conn_del a bit and may benefit future users of hci_conn_unlink. But it is noted that this change additionally implies that hci_conn_unlink can queue disc_work on conn itself, with the following call stack: hci_conn_unlink(conn) [conn->parent == NULL] -> hci_conn_unlink(child) [child->parent == conn] -> hci_conn_drop(child->parent) -> queue_delayed_work(&conn->disc_work) Queued disc_work after hci_conn_del can be spurious, so during the process of hci_conn_del, it is necessary to make the call to cancel_delayed_work(&conn->disc_work) after invoking hci_conn_unlink. Signed-off-by: Ruihan Li Co-developed-by: Luiz Augusto von Dentz Signed-off-by: Luiz Augusto von Dentz --- net/bluetooth/hci_conn.c | 21 ++++++--------------- 1 file changed, 6 insertions(+), 15 deletions(-) diff --git a/net/bluetooth/hci_conn.c b/net/bluetooth/hci_conn.c index ce588359b290..f75ef12f18f7 100644 --- a/net/bluetooth/hci_conn.c +++ b/net/bluetooth/hci_conn.c @@ -1100,7 +1100,9 @@ static void hci_conn_unlink(struct hci_conn *conn) * yet at this point. Delete it now, otherwise it is * possible for it to be stuck and can't be deleted. */ - if (child->handle == HCI_CONN_HANDLE_UNSET) + if ((child->type == SCO_LINK || + child->type == ESCO_LINK) && + child->handle == HCI_CONN_HANDLE_UNSET) hci_conn_del(child); } @@ -1113,6 +1115,7 @@ static void hci_conn_unlink(struct hci_conn *conn) list_del_rcu(&conn->link->list); synchronize_rcu(); + hci_conn_drop(conn->parent); hci_conn_put(conn->parent); conn->parent = NULL; @@ -1126,12 +1129,13 @@ void hci_conn_del(struct hci_conn *conn) BT_DBG("%s hcon %p handle %d", hdev->name, conn, conn->handle); + hci_conn_unlink(conn); + cancel_delayed_work_sync(&conn->disc_work); cancel_delayed_work_sync(&conn->auto_accept_work); cancel_delayed_work_sync(&conn->idle_work); if (conn->type == ACL_LINK) { - hci_conn_unlink(conn); /* Unacked frames */ hdev->acl_cnt += conn->sent; } else if (conn->type == LE_LINK) { @@ -1142,13 +1146,6 @@ void hci_conn_del(struct hci_conn *conn) else hdev->acl_cnt += conn->sent; } else { - struct hci_conn *acl = conn->parent; - - if (acl) { - hci_conn_unlink(conn); - hci_conn_drop(acl); - } - /* Unacked ISO frames */ if (conn->type == ISO_LINK) { if (hdev->iso_pkts) @@ -2485,12 +2482,6 @@ void hci_conn_hash_flush(struct hci_dev *hdev) list)) != NULL) { conn->state = BT_CLOSED; hci_disconn_cfm(conn, HCI_ERROR_LOCAL_HOST_TERM); - - /* Unlink before deleting otherwise it is possible that - * hci_conn_del removes the link which may cause the list to - * contain items already freed. - */ - hci_conn_unlink(conn); hci_conn_del(conn); } } From 6ce5169e05aa5360a49a574cc1490ceea6b651a6 Mon Sep 17 00:00:00 2001 From: Neeraj Sanjay Kale Date: Thu, 18 May 2023 22:13:47 +0530 Subject: [PATCH 24/79] Bluetooth: btnxpuart: Fix compiler warnings This fixes the follwing compiler warning reported by kernel test robot: drivers/bluetooth/btnxpuart.c:1332:34: warning: unused variable 'nxpuart_of_match_table' [-Wunused-const-variable] Signed-off-by: Neeraj Sanjay Kale Reported-by: kernel test robot Link: https://lore.kernel.org/oe-kbuild-all/202305161345.eClvTYQ9-lkp@intel.com/ Signed-off-by: Luiz Augusto von Dentz --- drivers/bluetooth/btnxpuart.c | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/drivers/bluetooth/btnxpuart.c b/drivers/bluetooth/btnxpuart.c index 3a34d7c1475b..52ef44688d38 100644 --- a/drivers/bluetooth/btnxpuart.c +++ b/drivers/bluetooth/btnxpuart.c @@ -1319,17 +1319,17 @@ static void nxp_serdev_remove(struct serdev_device *serdev) hci_free_dev(hdev); } -static struct btnxpuart_data w8987_data = { +static struct btnxpuart_data w8987_data __maybe_unused = { .helper_fw_name = NULL, .fw_name = FIRMWARE_W8987, }; -static struct btnxpuart_data w8997_data = { +static struct btnxpuart_data w8997_data __maybe_unused = { .helper_fw_name = FIRMWARE_HELPER, .fw_name = FIRMWARE_W8997, }; -static const struct of_device_id nxpuart_of_match_table[] = { +static const struct of_device_id nxpuart_of_match_table[] __maybe_unused = { { .compatible = "nxp,88w8987-bt", .data = &w8987_data }, { .compatible = "nxp,88w8997-bt", .data = &w8997_data }, { } From ae9b15fbe63447bc1d3bba3769f409d17ca6fdf6 Mon Sep 17 00:00:00 2001 From: Taehee Yoo Date: Wed, 17 May 2023 14:30:10 +0000 Subject: [PATCH 25/79] net: fix stack overflow when LRO is disabled for virtual interfaces When the virtual interface's feature is updated, it synchronizes the updated feature for its own lower interface. This propagation logic should be worked as the iteration, not recursively. But it works recursively due to the netdev notification unexpectedly. This problem occurs when it disables LRO only for the team and bonding interface type. team0 | +------+------+-----+-----+ | | | | | team1 team2 team3 ... team200 If team0's LRO feature is updated, it generates the NETDEV_FEAT_CHANGE event to its own lower interfaces(team1 ~ team200). It is worked by netdev_sync_lower_features(). So, the NETDEV_FEAT_CHANGE notification logic of each lower interface work iteratively. But generated NETDEV_FEAT_CHANGE event is also sent to the upper interface too. upper interface(team0) generates the NETDEV_FEAT_CHANGE event for its own lower interfaces again. lower and upper interfaces receive this event and generate this event again and again. So, the stack overflow occurs. But it is not the infinite loop issue. Because the netdev_sync_lower_features() updates features before generating the NETDEV_FEAT_CHANGE event. Already synchronized lower interfaces skip notification logic. So, it is just the problem that iteration logic is changed to the recursive unexpectedly due to the notification mechanism. Reproducer: ip link add team0 type team ethtool -K team0 lro on for i in {1..200} do ip link add team$i master team0 type team ethtool -K team$i lro on done ethtool -K team0 lro off In order to fix it, the notifier_ctx member of bonding/team is introduced. Reported-by: syzbot+60748c96cf5c6df8e581@syzkaller.appspotmail.com Fixes: fd867d51f889 ("net/core: generic support for disabling netdev features down stack") Signed-off-by: Taehee Yoo Reviewed-by: Eric Dumazet Reviewed-by: Nikolay Aleksandrov Link: https://lore.kernel.org/r/20230517143010.3596250-1-ap420073@gmail.com Signed-off-by: Jakub Kicinski --- drivers/net/bonding/bond_main.c | 8 +++++++- drivers/net/team/team.c | 7 ++++++- include/linux/if_team.h | 1 + include/net/bonding.h | 1 + 4 files changed, 15 insertions(+), 2 deletions(-) diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c index 3fed888629f7..edbaa1444f8e 100644 --- a/drivers/net/bonding/bond_main.c +++ b/drivers/net/bonding/bond_main.c @@ -3947,7 +3947,11 @@ static int bond_slave_netdev_event(unsigned long event, unblock_netpoll_tx(); break; case NETDEV_FEAT_CHANGE: - bond_compute_features(bond); + if (!bond->notifier_ctx) { + bond->notifier_ctx = true; + bond_compute_features(bond); + bond->notifier_ctx = false; + } break; case NETDEV_RESEND_IGMP: /* Propagate to master device */ @@ -6342,6 +6346,8 @@ static int bond_init(struct net_device *bond_dev) if (!bond->wq) return -ENOMEM; + bond->notifier_ctx = false; + spin_lock_init(&bond->stats_lock); netdev_lockdep_set_classes(bond_dev); diff --git a/drivers/net/team/team.c b/drivers/net/team/team.c index d10606f257c4..555b0b1e9a78 100644 --- a/drivers/net/team/team.c +++ b/drivers/net/team/team.c @@ -1629,6 +1629,7 @@ static int team_init(struct net_device *dev) team->dev = dev; team_set_no_mode(team); + team->notifier_ctx = false; team->pcpu_stats = netdev_alloc_pcpu_stats(struct team_pcpu_stats); if (!team->pcpu_stats) @@ -3022,7 +3023,11 @@ static int team_device_event(struct notifier_block *unused, team_del_slave(port->team->dev, dev); break; case NETDEV_FEAT_CHANGE: - team_compute_features(port->team); + if (!port->team->notifier_ctx) { + port->team->notifier_ctx = true; + team_compute_features(port->team); + port->team->notifier_ctx = false; + } break; case NETDEV_PRECHANGEMTU: /* Forbid to change mtu of underlaying device */ diff --git a/include/linux/if_team.h b/include/linux/if_team.h index fc985e5c739d..8de6b6e67829 100644 --- a/include/linux/if_team.h +++ b/include/linux/if_team.h @@ -208,6 +208,7 @@ struct team { bool queue_override_enabled; struct list_head *qom_lists; /* array of queue override mapping lists */ bool port_mtu_change_allowed; + bool notifier_ctx; struct { unsigned int count; unsigned int interval; /* in ms */ diff --git a/include/net/bonding.h b/include/net/bonding.h index 0efef2a952b7..59955ac33157 100644 --- a/include/net/bonding.h +++ b/include/net/bonding.h @@ -221,6 +221,7 @@ struct bonding { struct bond_up_slave __rcu *usable_slaves; struct bond_up_slave __rcu *all_slaves; bool force_primary; + bool notifier_ctx; s32 slave_cnt; /* never change this value outside the attach/detach wrappers */ int (*recv_probe)(const struct sk_buff *, struct bonding *, struct slave *); From 6ca328e985cd995dfd1d5de44046e6074f853fbb Mon Sep 17 00:00:00 2001 From: Xin Long Date: Thu, 18 May 2023 16:03:00 -0400 Subject: [PATCH 26/79] sctp: fix an issue that plpmtu can never go to complete state When doing plpmtu probe, the probe size is growing every time when it receives the ACK during the Search state until the probe fails. When the failure occurs, pl.probe_high is set and it goes to the Complete state. However, if the link pmtu is huge, like 65535 in loopback_dev, the probe eventually keeps using SCTP_MAX_PLPMTU as the probe size and never fails. Because of that, pl.probe_high can not be set, and the plpmtu probe can never go to the Complete state. Fix it by setting pl.probe_high to SCTP_MAX_PLPMTU when the probe size grows to SCTP_MAX_PLPMTU in sctp_transport_pl_recv(). Also, not allow the probe size greater than SCTP_MAX_PLPMTU in the Complete state. Fixes: b87641aff9e7 ("sctp: do state transition when a probe succeeds on HB ACK recv path") Signed-off-by: Xin Long Signed-off-by: David S. Miller --- net/sctp/transport.c | 11 +++++++---- 1 file changed, 7 insertions(+), 4 deletions(-) diff --git a/net/sctp/transport.c b/net/sctp/transport.c index 2f66a2006517..2abe45af98e7 100644 --- a/net/sctp/transport.c +++ b/net/sctp/transport.c @@ -324,9 +324,12 @@ bool sctp_transport_pl_recv(struct sctp_transport *t) t->pl.probe_size += SCTP_PL_BIG_STEP; } else if (t->pl.state == SCTP_PL_SEARCH) { if (!t->pl.probe_high) { - t->pl.probe_size = min(t->pl.probe_size + SCTP_PL_BIG_STEP, - SCTP_MAX_PLPMTU); - return false; + if (t->pl.probe_size < SCTP_MAX_PLPMTU) { + t->pl.probe_size = min(t->pl.probe_size + SCTP_PL_BIG_STEP, + SCTP_MAX_PLPMTU); + return false; + } + t->pl.probe_high = SCTP_MAX_PLPMTU; } t->pl.probe_size += SCTP_PL_MIN_STEP; if (t->pl.probe_size >= t->pl.probe_high) { @@ -341,7 +344,7 @@ bool sctp_transport_pl_recv(struct sctp_transport *t) } else if (t->pl.state == SCTP_PL_COMPLETE) { /* Raise probe_size again after 30 * interval in Search Complete */ t->pl.state = SCTP_PL_SEARCH; /* Search Complete -> Search */ - t->pl.probe_size += SCTP_PL_MIN_STEP; + t->pl.probe_size = min(t->pl.probe_size + SCTP_PL_MIN_STEP, SCTP_MAX_PLPMTU); } return t->pl.state == SCTP_PL_COMPLETE; From b34ffb0c6d23583830f9327864b9c1f486003305 Mon Sep 17 00:00:00 2001 From: Anton Protopopov Date: Mon, 22 May 2023 15:45:58 +0000 Subject: [PATCH 27/79] bpf: fix a memory leak in the LRU and LRU_PERCPU hash maps The LRU and LRU_PERCPU maps allocate a new element on update before locking the target hash table bucket. Right after that the maps try to lock the bucket. If this fails, then maps return -EBUSY to the caller without releasing the allocated element. This makes the element untracked: it doesn't belong to either of free lists, and it doesn't belong to the hash table, so can't be re-used; this eventually leads to the permanent -ENOMEM on LRU map updates, which is unexpected. Fix this by returning the element to the local free list if bucket locking fails. Fixes: 20b6cc34ea74 ("bpf: Avoid hashtab deadlock with map_locked") Signed-off-by: Anton Protopopov Link: https://lore.kernel.org/r/20230522154558.2166815-1-aspsk@isovalent.com Signed-off-by: Martin KaFai Lau --- kernel/bpf/hashtab.c | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/kernel/bpf/hashtab.c b/kernel/bpf/hashtab.c index 00c253b84bf5..9901efee4339 100644 --- a/kernel/bpf/hashtab.c +++ b/kernel/bpf/hashtab.c @@ -1215,7 +1215,7 @@ static long htab_lru_map_update_elem(struct bpf_map *map, void *key, void *value ret = htab_lock_bucket(htab, b, hash, &flags); if (ret) - return ret; + goto err_lock_bucket; l_old = lookup_elem_raw(head, hash, key, key_size); @@ -1236,6 +1236,7 @@ static long htab_lru_map_update_elem(struct bpf_map *map, void *key, void *value err: htab_unlock_bucket(htab, b, hash, flags); +err_lock_bucket: if (ret) htab_lru_push_free(htab, l_new); else if (l_old) @@ -1338,7 +1339,7 @@ static long __htab_lru_percpu_map_update_elem(struct bpf_map *map, void *key, ret = htab_lock_bucket(htab, b, hash, &flags); if (ret) - return ret; + goto err_lock_bucket; l_old = lookup_elem_raw(head, hash, key, key_size); @@ -1361,6 +1362,7 @@ static long __htab_lru_percpu_map_update_elem(struct bpf_map *map, void *key, ret = 0; err: htab_unlock_bucket(htab, b, hash, flags); +err_lock_bucket: if (l_new) bpf_lru_push_free(&htab->lru, &l_new->lru_node); return ret; From 5b17a4971d3b2a073f4078dd65331efbe35baa2d Mon Sep 17 00:00:00 2001 From: Christophe JAILLET Date: Sat, 20 May 2023 10:30:17 +0200 Subject: [PATCH 28/79] forcedeth: Fix an error handling path in nv_probe() If an error occures after calling nv_mgmt_acquire_sema(), it should be undone with a corresponding nv_mgmt_release_sema() call. Add it in the error handling path of the probe as already done in the remove function. Fixes: cac1c52c3621 ("forcedeth: mgmt unit interface") Signed-off-by: Christophe JAILLET Acked-by: Zhu Yanjun Link: https://lore.kernel.org/r/355e9a7d351b32ad897251b6f81b5886fcdc6766.1684571393.git.christophe.jaillet@wanadoo.fr Signed-off-by: Jakub Kicinski --- drivers/net/ethernet/nvidia/forcedeth.c | 1 + 1 file changed, 1 insertion(+) diff --git a/drivers/net/ethernet/nvidia/forcedeth.c b/drivers/net/ethernet/nvidia/forcedeth.c index 0605d1ee490d..7a549b834e97 100644 --- a/drivers/net/ethernet/nvidia/forcedeth.c +++ b/drivers/net/ethernet/nvidia/forcedeth.c @@ -6138,6 +6138,7 @@ static int nv_probe(struct pci_dev *pci_dev, const struct pci_device_id *id) return 0; out_error: + nv_mgmt_release_sema(dev); if (phystate_orig) writel(phystate|NVREG_ADAPTCTL_RUNNING, base + NvRegAdapterControl); out_freering: From 640bf95b2c7c2981fb471acdafbd3e0458f8390d Mon Sep 17 00:00:00 2001 From: Christophe JAILLET Date: Sat, 20 May 2023 11:48:55 +0200 Subject: [PATCH 29/79] 3c589_cs: Fix an error handling path in tc589_probe() Should tc589_config() fail, some resources need to be released as already done in the remove function. Fixes: 15b99ac17295 ("[PATCH] pcmcia: add return value to _config() functions") Signed-off-by: Christophe JAILLET Reviewed-by: Simon Horman Link: https://lore.kernel.org/r/d8593ae867b24c79063646e36f9b18b0790107cb.1684575975.git.christophe.jaillet@wanadoo.fr Signed-off-by: Jakub Kicinski --- drivers/net/ethernet/3com/3c589_cs.c | 11 ++++++++++- 1 file changed, 10 insertions(+), 1 deletion(-) diff --git a/drivers/net/ethernet/3com/3c589_cs.c b/drivers/net/ethernet/3com/3c589_cs.c index 82f94b1635bf..5267e9dcd87e 100644 --- a/drivers/net/ethernet/3com/3c589_cs.c +++ b/drivers/net/ethernet/3com/3c589_cs.c @@ -195,6 +195,7 @@ static int tc589_probe(struct pcmcia_device *link) { struct el3_private *lp; struct net_device *dev; + int ret; dev_dbg(&link->dev, "3c589_attach()\n"); @@ -218,7 +219,15 @@ static int tc589_probe(struct pcmcia_device *link) dev->ethtool_ops = &netdev_ethtool_ops; - return tc589_config(link); + ret = tc589_config(link); + if (ret) + goto err_free_netdev; + + return 0; + +err_free_netdev: + free_netdev(dev); + return ret; } static void tc589_detach(struct pcmcia_device *link) From b21c7ba6d9a5532add3827a3b49f49cbc0cb9779 Mon Sep 17 00:00:00 2001 From: Chuck Lever Date: Fri, 19 May 2023 13:12:50 -0400 Subject: [PATCH 30/79] net/handshake: Squelch allocation warning during Kunit test The "handshake_req_alloc excessive privsize" kunit test is intended to check what happens when the maximum privsize is exceeded. The WARN_ON_ONCE_GFP at mm/page_alloc.c:4744 can be disabled safely for this test. Reported-by: Linux Kernel Functional Testing Fixes: 88232ec1ec5e ("net/handshake: Add Kunit tests for the handshake consumer API") Signed-off-by: Chuck Lever Link: https://lore.kernel.org/r/168451636052.47152.9600443326570457947.stgit@oracle-102.nfsv4bat.org Signed-off-by: Jakub Kicinski --- net/handshake/handshake-test.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/net/handshake/handshake-test.c b/net/handshake/handshake-test.c index e6adc5dec11a..6193e46ee6d9 100644 --- a/net/handshake/handshake-test.c +++ b/net/handshake/handshake-test.c @@ -102,7 +102,7 @@ struct handshake_req_alloc_test_param handshake_req_alloc_params[] = { { .desc = "handshake_req_alloc excessive privsize", .proto = &handshake_req_alloc_proto_6, - .gfp = GFP_KERNEL, + .gfp = GFP_KERNEL | __GFP_NOWARN, .expect_success = false, }, { From 18c40a1cc1d990c51381ef48cd93fdb31d5cd903 Mon Sep 17 00:00:00 2001 From: Chuck Lever Date: Fri, 19 May 2023 13:08:24 -0400 Subject: [PATCH 31/79] net/handshake: Fix sock->file allocation sock->file = sock_alloc_file(sock, O_NONBLOCK, NULL); ^^^^ ^^^^ sock_alloc_file() calls release_sock() on error but the left hand side of the assignment dereferences "sock". This isn't the bug and I didn't report this earlier because there is an assert that it doesn't fail. net/handshake/handshake-test.c:221 handshake_req_submit_test4() error: dereferencing freed memory 'sock' net/handshake/handshake-test.c:233 handshake_req_submit_test4() warn: 'req' was already freed. net/handshake/handshake-test.c:254 handshake_req_submit_test5() error: dereferencing freed memory 'sock' net/handshake/handshake-test.c:290 handshake_req_submit_test6() error: dereferencing freed memory 'sock' net/handshake/handshake-test.c:321 handshake_req_cancel_test1() error: dereferencing freed memory 'sock' net/handshake/handshake-test.c:355 handshake_req_cancel_test2() error: dereferencing freed memory 'sock' net/handshake/handshake-test.c:367 handshake_req_cancel_test2() warn: 'req' was already freed. net/handshake/handshake-test.c:395 handshake_req_cancel_test3() error: dereferencing freed memory 'sock' net/handshake/handshake-test.c:407 handshake_req_cancel_test3() warn: 'req' was already freed. net/handshake/handshake-test.c:451 handshake_req_destroy_test1() error: dereferencing freed memory 'sock' net/handshake/handshake-test.c:463 handshake_req_destroy_test1() warn: 'req' was already freed. Reported-by: Dan Carpenter Fixes: 88232ec1ec5e ("net/handshake: Add Kunit tests for the handshake consumer API") Signed-off-by: Chuck Lever Link: https://lore.kernel.org/r/168451609436.45209.15407022385441542980.stgit@oracle-102.nfsv4bat.org Signed-off-by: Jakub Kicinski --- net/handshake/handshake-test.c | 42 ++++++++++++++++++++++------------ 1 file changed, 28 insertions(+), 14 deletions(-) diff --git a/net/handshake/handshake-test.c b/net/handshake/handshake-test.c index 6193e46ee6d9..6d37bab35c8f 100644 --- a/net/handshake/handshake-test.c +++ b/net/handshake/handshake-test.c @@ -209,6 +209,7 @@ static void handshake_req_submit_test4(struct kunit *test) { struct handshake_req *req, *result; struct socket *sock; + struct file *filp; int err; /* Arrange */ @@ -218,9 +219,10 @@ static void handshake_req_submit_test4(struct kunit *test) err = __sock_create(&init_net, PF_INET, SOCK_STREAM, IPPROTO_TCP, &sock, 1); KUNIT_ASSERT_EQ(test, err, 0); - sock->file = sock_alloc_file(sock, O_NONBLOCK, NULL); - KUNIT_ASSERT_NOT_ERR_OR_NULL(test, sock->file); + filp = sock_alloc_file(sock, O_NONBLOCK, NULL); + KUNIT_ASSERT_NOT_ERR_OR_NULL(test, filp); KUNIT_ASSERT_NOT_NULL(test, sock->sk); + sock->file = filp; err = handshake_req_submit(sock, req, GFP_KERNEL); KUNIT_ASSERT_EQ(test, err, 0); @@ -241,6 +243,7 @@ static void handshake_req_submit_test5(struct kunit *test) struct handshake_req *req; struct handshake_net *hn; struct socket *sock; + struct file *filp; struct net *net; int saved, err; @@ -251,9 +254,10 @@ static void handshake_req_submit_test5(struct kunit *test) err = __sock_create(&init_net, PF_INET, SOCK_STREAM, IPPROTO_TCP, &sock, 1); KUNIT_ASSERT_EQ(test, err, 0); - sock->file = sock_alloc_file(sock, O_NONBLOCK, NULL); - KUNIT_ASSERT_NOT_ERR_OR_NULL(test, sock->file); + filp = sock_alloc_file(sock, O_NONBLOCK, NULL); + KUNIT_ASSERT_NOT_ERR_OR_NULL(test, filp); KUNIT_ASSERT_NOT_NULL(test, sock->sk); + sock->file = filp; net = sock_net(sock->sk); hn = handshake_pernet(net); @@ -276,6 +280,7 @@ static void handshake_req_submit_test6(struct kunit *test) { struct handshake_req *req1, *req2; struct socket *sock; + struct file *filp; int err; /* Arrange */ @@ -287,9 +292,10 @@ static void handshake_req_submit_test6(struct kunit *test) err = __sock_create(&init_net, PF_INET, SOCK_STREAM, IPPROTO_TCP, &sock, 1); KUNIT_ASSERT_EQ(test, err, 0); - sock->file = sock_alloc_file(sock, O_NONBLOCK, NULL); - KUNIT_ASSERT_NOT_ERR_OR_NULL(test, sock->file); + filp = sock_alloc_file(sock, O_NONBLOCK, NULL); + KUNIT_ASSERT_NOT_ERR_OR_NULL(test, filp); KUNIT_ASSERT_NOT_NULL(test, sock->sk); + sock->file = filp; /* Act */ err = handshake_req_submit(sock, req1, GFP_KERNEL); @@ -307,6 +313,7 @@ static void handshake_req_cancel_test1(struct kunit *test) { struct handshake_req *req; struct socket *sock; + struct file *filp; bool result; int err; @@ -318,8 +325,9 @@ static void handshake_req_cancel_test1(struct kunit *test) &sock, 1); KUNIT_ASSERT_EQ(test, err, 0); - sock->file = sock_alloc_file(sock, O_NONBLOCK, NULL); - KUNIT_ASSERT_NOT_ERR_OR_NULL(test, sock->file); + filp = sock_alloc_file(sock, O_NONBLOCK, NULL); + KUNIT_ASSERT_NOT_ERR_OR_NULL(test, filp); + sock->file = filp; err = handshake_req_submit(sock, req, GFP_KERNEL); KUNIT_ASSERT_EQ(test, err, 0); @@ -340,6 +348,7 @@ static void handshake_req_cancel_test2(struct kunit *test) struct handshake_req *req, *next; struct handshake_net *hn; struct socket *sock; + struct file *filp; struct net *net; bool result; int err; @@ -352,8 +361,9 @@ static void handshake_req_cancel_test2(struct kunit *test) &sock, 1); KUNIT_ASSERT_EQ(test, err, 0); - sock->file = sock_alloc_file(sock, O_NONBLOCK, NULL); - KUNIT_ASSERT_NOT_ERR_OR_NULL(test, sock->file); + filp = sock_alloc_file(sock, O_NONBLOCK, NULL); + KUNIT_ASSERT_NOT_ERR_OR_NULL(test, filp); + sock->file = filp; err = handshake_req_submit(sock, req, GFP_KERNEL); KUNIT_ASSERT_EQ(test, err, 0); @@ -380,6 +390,7 @@ static void handshake_req_cancel_test3(struct kunit *test) struct handshake_req *req, *next; struct handshake_net *hn; struct socket *sock; + struct file *filp; struct net *net; bool result; int err; @@ -392,8 +403,9 @@ static void handshake_req_cancel_test3(struct kunit *test) &sock, 1); KUNIT_ASSERT_EQ(test, err, 0); - sock->file = sock_alloc_file(sock, O_NONBLOCK, NULL); - KUNIT_ASSERT_NOT_ERR_OR_NULL(test, sock->file); + filp = sock_alloc_file(sock, O_NONBLOCK, NULL); + KUNIT_ASSERT_NOT_ERR_OR_NULL(test, filp); + sock->file = filp; err = handshake_req_submit(sock, req, GFP_KERNEL); KUNIT_ASSERT_EQ(test, err, 0); @@ -436,6 +448,7 @@ static void handshake_req_destroy_test1(struct kunit *test) { struct handshake_req *req; struct socket *sock; + struct file *filp; int err; /* Arrange */ @@ -448,8 +461,9 @@ static void handshake_req_destroy_test1(struct kunit *test) &sock, 1); KUNIT_ASSERT_EQ(test, err, 0); - sock->file = sock_alloc_file(sock, O_NONBLOCK, NULL); - KUNIT_ASSERT_NOT_ERR_OR_NULL(test, sock->file); + filp = sock_alloc_file(sock, O_NONBLOCK, NULL); + KUNIT_ASSERT_NOT_ERR_OR_NULL(test, filp); + sock->file = filp; err = handshake_req_submit(sock, req, GFP_KERNEL); KUNIT_ASSERT_EQ(test, err, 0); From 2a0a935fb64ee8af253b9c6133bb6702fb152ac2 Mon Sep 17 00:00:00 2001 From: Shay Drory Date: Tue, 2 May 2023 11:03:53 +0300 Subject: [PATCH 32/79] net/mlx5: Collect command failures data only for known commands DEVX can issue a general command, which is not used by mlx5 driver. In case such command is failed, mlx5 is trying to collect the failure data, However, mlx5 doesn't create a storage for this command, since mlx5 doesn't use it. This lead to array-index-out-of-bounds error. Fix it by checking whether the command is known before collecting the failure data. Fixes: 34f46ae0d4b3 ("net/mlx5: Add command failures data to debugfs") Signed-off-by: Shay Drory Signed-off-by: Saeed Mahameed --- drivers/net/ethernet/mellanox/mlx5/core/cmd.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/drivers/net/ethernet/mellanox/mlx5/core/cmd.c b/drivers/net/ethernet/mellanox/mlx5/core/cmd.c index d53de39539a8..d532883b42d7 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/cmd.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/cmd.c @@ -1920,9 +1920,10 @@ static void mlx5_cmd_err_trace(struct mlx5_core_dev *dev, u16 opcode, u16 op_mod static void cmd_status_log(struct mlx5_core_dev *dev, u16 opcode, u8 status, u32 syndrome, int err) { + const char *namep = mlx5_command_str(opcode); struct mlx5_cmd_stats *stats; - if (!err) + if (!err || !(strcmp(namep, "unknown command opcode"))) return; stats = &dev->cmd.stats[opcode]; From 2be5bd42a5bba1a05daedc86cf0e248210009669 Mon Sep 17 00:00:00 2001 From: Shay Drory Date: Mon, 20 Mar 2023 13:07:53 +0200 Subject: [PATCH 33/79] net/mlx5: Handle pairing of E-switch via uplink un/load APIs In case user switch a device from switchdev mode to legacy mode, mlx5 first unpair the E-switch and afterwards unload the uplink vport. From the other hand, in case user remove or reload a device, mlx5 first unload the uplink vport and afterwards unpair the E-switch. The latter is causing a bug[1], hence, handle pairing of E-switch as part of uplink un/load APIs. [1] In case VF_LAG is used, every tc fdb flow is duplicated to the peer esw. However, the original esw keeps a pointer to this duplicated flow, not the peer esw. e.g.: if user create tc fdb flow over esw0, the flow is duplicated over esw1, in FW/HW, but in SW, esw0 keeps a pointer to the duplicated flow. During module unload while a peer tc fdb flow is still offloaded, in case the first device to be removed is the peer device (esw1 in the example above), the peer net-dev is destroyed, and so the mlx5e_priv is memset to 0. Afterwards, the peer device is trying to unpair himself from the original device (esw0 in the example above). Unpair API invoke the original device to clear peer flow from its eswitch (esw0), but the peer flow, which is stored over the original eswitch (esw0), is trying to use the peer mlx5e_priv, which is memset to 0 and result in bellow kernel-oops. [ 157.964081 ] BUG: unable to handle page fault for address: 000000000002ce60 [ 157.964662 ] #PF: supervisor read access in kernel mode [ 157.965123 ] #PF: error_code(0x0000) - not-present page [ 157.965582 ] PGD 0 P4D 0 [ 157.965866 ] Oops: 0000 [#1] SMP [ 157.967670 ] RIP: 0010:mlx5e_tc_del_fdb_flow+0x48/0x460 [mlx5_core] [ 157.976164 ] Call Trace: [ 157.976437 ] [ 157.976690 ] __mlx5e_tc_del_fdb_peer_flow+0xe6/0x100 [mlx5_core] [ 157.977230 ] mlx5e_tc_clean_fdb_peer_flows+0x67/0x90 [mlx5_core] [ 157.977767 ] mlx5_esw_offloads_unpair+0x2d/0x1e0 [mlx5_core] [ 157.984653 ] mlx5_esw_offloads_devcom_event+0xbf/0x130 [mlx5_core] [ 157.985212 ] mlx5_devcom_send_event+0xa3/0xb0 [mlx5_core] [ 157.985714 ] esw_offloads_disable+0x5a/0x110 [mlx5_core] [ 157.986209 ] mlx5_eswitch_disable_locked+0x152/0x170 [mlx5_core] [ 157.986757 ] mlx5_eswitch_disable+0x51/0x80 [mlx5_core] [ 157.987248 ] mlx5_unload+0x2a/0xb0 [mlx5_core] [ 157.987678 ] mlx5_uninit_one+0x5f/0xd0 [mlx5_core] [ 157.988127 ] remove_one+0x64/0xe0 [mlx5_core] [ 157.988549 ] pci_device_remove+0x31/0xa0 [ 157.988933 ] device_release_driver_internal+0x18f/0x1f0 [ 157.989402 ] driver_detach+0x3f/0x80 [ 157.989754 ] bus_remove_driver+0x70/0xf0 [ 157.990129 ] pci_unregister_driver+0x34/0x90 [ 157.990537 ] mlx5_cleanup+0xc/0x1c [mlx5_core] [ 157.990972 ] __x64_sys_delete_module+0x15a/0x250 [ 157.991398 ] ? exit_to_user_mode_prepare+0xea/0x110 [ 157.991840 ] do_syscall_64+0x3d/0x90 [ 157.992198 ] entry_SYSCALL_64_after_hwframe+0x46/0xb0 Fixes: 04de7dda7394 ("net/mlx5e: Infrastructure for duplicated offloading of TC flows") Fixes: 1418ddd96afd ("net/mlx5e: Duplicate offloaded TC eswitch rules under uplink LAG") Signed-off-by: Shay Drory Reviewed-by: Roi Dayan Signed-off-by: Saeed Mahameed --- drivers/net/ethernet/mellanox/mlx5/core/en_tc.c | 4 +++- drivers/net/ethernet/mellanox/mlx5/core/eswitch.h | 4 ++++ drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c | 7 ++----- 3 files changed, 9 insertions(+), 6 deletions(-) diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c index 728b82ce4031..65fe40f55d84 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c @@ -5301,6 +5301,8 @@ int mlx5e_tc_esw_init(struct mlx5_rep_uplink_priv *uplink_priv) goto err_action_counter; } + mlx5_esw_offloads_devcom_init(esw); + return 0; err_action_counter: @@ -5329,7 +5331,7 @@ void mlx5e_tc_esw_cleanup(struct mlx5_rep_uplink_priv *uplink_priv) priv = netdev_priv(rpriv->netdev); esw = priv->mdev->priv.eswitch; - mlx5e_tc_clean_fdb_peer_flows(esw); + mlx5_esw_offloads_devcom_cleanup(esw); mlx5e_tc_tun_cleanup(uplink_priv->encap); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h index 1a042c981713..9f007c5438ee 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h @@ -369,6 +369,8 @@ int mlx5_eswitch_enable(struct mlx5_eswitch *esw, int num_vfs); void mlx5_eswitch_disable_sriov(struct mlx5_eswitch *esw, bool clear_vf); void mlx5_eswitch_disable_locked(struct mlx5_eswitch *esw); void mlx5_eswitch_disable(struct mlx5_eswitch *esw); +void mlx5_esw_offloads_devcom_init(struct mlx5_eswitch *esw); +void mlx5_esw_offloads_devcom_cleanup(struct mlx5_eswitch *esw); int mlx5_eswitch_set_vport_mac(struct mlx5_eswitch *esw, u16 vport, const u8 *mac); int mlx5_eswitch_set_vport_state(struct mlx5_eswitch *esw, @@ -767,6 +769,8 @@ static inline void mlx5_eswitch_cleanup(struct mlx5_eswitch *esw) {} static inline int mlx5_eswitch_enable(struct mlx5_eswitch *esw, int num_vfs) { return 0; } static inline void mlx5_eswitch_disable_sriov(struct mlx5_eswitch *esw, bool clear_vf) {} static inline void mlx5_eswitch_disable(struct mlx5_eswitch *esw) {} +static inline void mlx5_esw_offloads_devcom_init(struct mlx5_eswitch *esw) {} +static inline void mlx5_esw_offloads_devcom_cleanup(struct mlx5_eswitch *esw) {} static inline bool mlx5_eswitch_is_funcs_handler(struct mlx5_core_dev *dev) { return false; } static inline int mlx5_eswitch_set_vport_state(struct mlx5_eswitch *esw, u16 vport, int link_state) { return 0; } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c index 69215ffb9999..7c34c7cf506f 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c @@ -2779,7 +2779,7 @@ err_out: return err; } -static void esw_offloads_devcom_init(struct mlx5_eswitch *esw) +void mlx5_esw_offloads_devcom_init(struct mlx5_eswitch *esw) { struct mlx5_devcom *devcom = esw->dev->priv.devcom; @@ -2802,7 +2802,7 @@ static void esw_offloads_devcom_init(struct mlx5_eswitch *esw) ESW_OFFLOADS_DEVCOM_PAIR, esw); } -static void esw_offloads_devcom_cleanup(struct mlx5_eswitch *esw) +void mlx5_esw_offloads_devcom_cleanup(struct mlx5_eswitch *esw) { struct mlx5_devcom *devcom = esw->dev->priv.devcom; @@ -3250,8 +3250,6 @@ int esw_offloads_enable(struct mlx5_eswitch *esw) if (err) goto err_vports; - esw_offloads_devcom_init(esw); - return 0; err_vports: @@ -3292,7 +3290,6 @@ static int esw_offloads_stop(struct mlx5_eswitch *esw, void esw_offloads_disable(struct mlx5_eswitch *esw) { - esw_offloads_devcom_cleanup(esw); mlx5_eswitch_disable_pf_vf_vports(esw); esw_offloads_unload_rep(esw, MLX5_VPORT_UPLINK); esw_set_passing_vport_metadata(esw, false); From 1e5daf5565b61a96e570865091589afc9156e3d3 Mon Sep 17 00:00:00 2001 From: Erez Shitrit Date: Thu, 9 Mar 2023 16:43:15 +0200 Subject: [PATCH 34/79] net/mlx5: DR, Fix crc32 calculation to work on big-endian (BE) CPUs When calculating crc for hash index we use the function crc32 that calculates for little-endian (LE) arch. Then we convert it to network endianness using htonl(), but it's wrong to do the conversion in BE archs since the crc32 value is already LE. The solution is to switch the bytes from the crc result for all types of arc. Fixes: 40416d8ede65 ("net/mlx5: DR, Replace CRC32 implementation to use kernel lib") Signed-off-by: Erez Shitrit Reviewed-by: Alex Vesker Signed-off-by: Saeed Mahameed --- drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste.c index 9413aaf51251..e94fbb015efa 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste.c @@ -15,7 +15,8 @@ static u32 dr_ste_crc32_calc(const void *input_data, size_t length) { u32 crc = crc32(0, input_data, length); - return (__force u32)htonl(crc); + return (__force u32)((crc >> 24) & 0xff) | ((crc << 8) & 0xff0000) | + ((crc >> 8) & 0xff00) | ((crc << 24) & 0xff000000); } bool mlx5dr_ste_supp_ttl_cs_recalc(struct mlx5dr_cmd_caps *caps) From c7dd225bc224726c22db08e680bf787f60ebdee3 Mon Sep 17 00:00:00 2001 From: Yevgeny Kliteynik Date: Sun, 2 Apr 2023 17:14:10 +0300 Subject: [PATCH 35/79] net/mlx5: DR, Check force-loopback RC QP capability independently from RoCE SW Steering uses RC QP for writing STEs to ICM. This writingis done in LB (loopback), and FL (force-loopback) QP is preferred for performance. FL is available when RoCE is enabled or disabled based on RoCE caps. This patch adds reading of FL capability from HCA caps in addition to the existing reading from RoCE caps, thus fixing the case where we didn't have loopback enabled when RoCE was disabled. Fixes: 7304d603a57a ("net/mlx5: DR, Add support for force-loopback QP") Signed-off-by: Itamar Gozlan Signed-off-by: Yevgeny Kliteynik Signed-off-by: Saeed Mahameed --- drivers/net/ethernet/mellanox/mlx5/core/steering/dr_cmd.c | 4 +++- include/linux/mlx5/mlx5_ifc.h | 4 +++- 2 files changed, 6 insertions(+), 2 deletions(-) diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_cmd.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_cmd.c index 3835ba3f4dda..1aa525e509f1 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_cmd.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_cmd.c @@ -117,6 +117,8 @@ int mlx5dr_cmd_query_device(struct mlx5_core_dev *mdev, caps->gvmi = MLX5_CAP_GEN(mdev, vhca_id); caps->flex_protocols = MLX5_CAP_GEN(mdev, flex_parser_protocols); caps->sw_format_ver = MLX5_CAP_GEN(mdev, steering_format_version); + caps->roce_caps.fl_rc_qp_when_roce_disabled = + MLX5_CAP_GEN(mdev, fl_rc_qp_when_roce_disabled); if (MLX5_CAP_GEN(mdev, roce)) { err = dr_cmd_query_nic_vport_roce_en(mdev, 0, &roce_en); @@ -124,7 +126,7 @@ int mlx5dr_cmd_query_device(struct mlx5_core_dev *mdev, return err; caps->roce_caps.roce_en = roce_en; - caps->roce_caps.fl_rc_qp_when_roce_disabled = + caps->roce_caps.fl_rc_qp_when_roce_disabled |= MLX5_CAP_ROCE(mdev, fl_rc_qp_when_roce_disabled); caps->roce_caps.fl_rc_qp_when_roce_enabled = MLX5_CAP_ROCE(mdev, fl_rc_qp_when_roce_enabled); diff --git a/include/linux/mlx5/mlx5_ifc.h b/include/linux/mlx5/mlx5_ifc.h index dc5e2cb302a5..b89778d0d326 100644 --- a/include/linux/mlx5/mlx5_ifc.h +++ b/include/linux/mlx5/mlx5_ifc.h @@ -1705,7 +1705,9 @@ struct mlx5_ifc_cmd_hca_cap_bits { u8 rc[0x1]; u8 uar_4k[0x1]; - u8 reserved_at_241[0x9]; + u8 reserved_at_241[0x7]; + u8 fl_rc_qp_when_roce_disabled[0x1]; + u8 regexp_params[0x1]; u8 uar_sz[0x6]; u8 port_selection_cap[0x1]; u8 reserved_at_248[0x1]; From be071cdb167fc3e25fe81922166b3d499d23e8ac Mon Sep 17 00:00:00 2001 From: Vlad Buslov Date: Mon, 3 Apr 2023 22:26:00 +0200 Subject: [PATCH 36/79] net/mlx5e: Use correct encap attribute during invalidation With introduction of post action infrastructure most of the users of encap attribute had been modified in order to obtain the correct attribute by calling mlx5e_tc_get_encap_attr() helper instead of assuming encap action is always on default attribute. However, the cited commit didn't modify mlx5e_invalidate_encap() which prevents it from destroying correct modify header action which leads to a warning [0]. Fix the issue by using correct attribute. [0]: Feb 21 09:47:35 c-237-177-40-045 kernel: WARNING: CPU: 17 PID: 654 at drivers/net/ethernet/mellanox/mlx5/core/en_tc.c:684 mlx5e_tc_attach_mod_hdr+0x1cc/0x230 [mlx5_core] Feb 21 09:47:35 c-237-177-40-045 kernel: RIP: 0010:mlx5e_tc_attach_mod_hdr+0x1cc/0x230 [mlx5_core] Feb 21 09:47:35 c-237-177-40-045 kernel: Call Trace: Feb 21 09:47:35 c-237-177-40-045 kernel: Feb 21 09:47:35 c-237-177-40-045 kernel: mlx5e_tc_fib_event_work+0x8e3/0x1f60 [mlx5_core] Feb 21 09:47:35 c-237-177-40-045 kernel: ? mlx5e_take_all_encap_flows+0xe0/0xe0 [mlx5_core] Feb 21 09:47:35 c-237-177-40-045 kernel: ? lock_downgrade+0x6d0/0x6d0 Feb 21 09:47:35 c-237-177-40-045 kernel: ? lockdep_hardirqs_on_prepare+0x273/0x3f0 Feb 21 09:47:35 c-237-177-40-045 kernel: ? lockdep_hardirqs_on_prepare+0x273/0x3f0 Feb 21 09:47:35 c-237-177-40-045 kernel: process_one_work+0x7c2/0x1310 Feb 21 09:47:35 c-237-177-40-045 kernel: ? lockdep_hardirqs_on_prepare+0x3f0/0x3f0 Feb 21 09:47:35 c-237-177-40-045 kernel: ? pwq_dec_nr_in_flight+0x230/0x230 Feb 21 09:47:35 c-237-177-40-045 kernel: ? rwlock_bug.part.0+0x90/0x90 Feb 21 09:47:35 c-237-177-40-045 kernel: worker_thread+0x59d/0xec0 Feb 21 09:47:35 c-237-177-40-045 kernel: ? __kthread_parkme+0xd9/0x1d0 Fixes: 8300f225268b ("net/mlx5e: Create new flow attr for multi table actions") Signed-off-by: Vlad Buslov Reviewed-by: Roi Dayan Reviewed-by: Tariq Toukan Signed-off-by: Saeed Mahameed --- drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_encap.c | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_encap.c b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_encap.c index 20c2d2ecaf93..6a052c6cfc15 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_encap.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_encap.c @@ -1369,11 +1369,13 @@ static void mlx5e_invalidate_encap(struct mlx5e_priv *priv, struct mlx5e_tc_flow *flow; list_for_each_entry(flow, encap_flows, tmp_list) { - struct mlx5_flow_attr *attr = flow->attr; struct mlx5_esw_flow_attr *esw_attr; + struct mlx5_flow_attr *attr; if (!mlx5e_is_offloaded_flow(flow)) continue; + + attr = mlx5e_tc_get_encap_attr(flow); esw_attr = attr->esw_attr; if (flow_flag_test(flow, SLOW)) From a65735148e0328f80c0f72f9f8d2f609bfcf4aff Mon Sep 17 00:00:00 2001 From: Roi Dayan Date: Mon, 1 May 2023 14:37:56 +0300 Subject: [PATCH 37/79] net/mlx5: Fix error message when failing to allocate device memory Fix spacing for the error and also the correct error code pointer. Fixes: c9b9dcb430b3 ("net/mlx5: Move device memory management to mlx5_core") Signed-off-by: Roi Dayan Signed-off-by: Saeed Mahameed --- drivers/net/ethernet/mellanox/mlx5/core/main.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/net/ethernet/mellanox/mlx5/core/main.c b/drivers/net/ethernet/mellanox/mlx5/core/main.c index 995eb2d5ace0..a7eb65cd0bdd 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/main.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/main.c @@ -1049,7 +1049,7 @@ static int mlx5_init_once(struct mlx5_core_dev *dev) dev->dm = mlx5_dm_create(dev); if (IS_ERR(dev->dm)) - mlx5_core_warn(dev, "Failed to init device memory%d\n", err); + mlx5_core_warn(dev, "Failed to init device memory %ld\n", PTR_ERR(dev->dm)); dev->tracer = mlx5_fw_tracer_create(dev); dev->hv_vhca = mlx5_hv_vhca_create(dev); From 691c041bf20899fc13c793f92ba61ab660fa3a30 Mon Sep 17 00:00:00 2001 From: Vlad Buslov Date: Fri, 31 Mar 2023 14:20:51 +0200 Subject: [PATCH 38/79] net/mlx5e: Fix deadlock in tc route query code Cited commit causes ABBA deadlock[0] when peer flows are created while holding the devcom rw semaphore. Due to peer flows offload implementation the lock is taken much higher up the call chain and there is no obvious way to easily fix the deadlock. Instead, since tc route query code needs the peer eswitch structure only to perform a lookup in xarray and doesn't perform any sleeping operations with it, refactor the code for lockless execution in following ways: - RCUify the devcom 'data' pointer. When resetting the pointer synchronously wait for RCU grace period before returning. This is fine since devcom is currently only used for synchronization of pairing/unpairing of eswitches which is rare and already expensive as-is. - Wrap all usages of 'paired' boolean in {READ|WRITE}_ONCE(). The flag has already been used in some unlocked contexts without proper annotations (e.g. users of mlx5_devcom_is_paired() function), but it wasn't an issue since all relevant code paths checked it again after obtaining the devcom semaphore. Now it is also used by mlx5_devcom_get_peer_data_rcu() as "best effort" check to return NULL when devcom is being unpaired. Note that while RCU read lock doesn't prevent the unpaired flag from being changed concurrently it still guarantees that reader can continue to use 'data'. - Refactor mlx5e_tc_query_route_vport() function to use new mlx5_devcom_get_peer_data_rcu() API which fixes the deadlock. [0]: [ 164.599612] ====================================================== [ 164.600142] WARNING: possible circular locking dependency detected [ 164.600667] 6.3.0-rc3+ #1 Not tainted [ 164.601021] ------------------------------------------------------ [ 164.601557] handler1/3456 is trying to acquire lock: [ 164.601998] ffff88811f1714b0 (&esw->offloads.encap_tbl_lock){+.+.}-{3:3}, at: mlx5e_attach_encap+0xd8/0x8b0 [mlx5_core] [ 164.603078] but task is already holding lock: [ 164.603617] ffff88810137fc98 (&comp->sem){++++}-{3:3}, at: mlx5_devcom_get_peer_data+0x37/0x80 [mlx5_core] [ 164.604459] which lock already depends on the new lock. [ 164.605190] the existing dependency chain (in reverse order) is: [ 164.605848] -> #1 (&comp->sem){++++}-{3:3}: [ 164.606380] down_read+0x39/0x50 [ 164.606772] mlx5_devcom_get_peer_data+0x37/0x80 [mlx5_core] [ 164.607336] mlx5e_tc_query_route_vport+0x86/0xc0 [mlx5_core] [ 164.607914] mlx5e_tc_tun_route_lookup+0x1a4/0x1d0 [mlx5_core] [ 164.608495] mlx5e_attach_decap_route+0xc6/0x1e0 [mlx5_core] [ 164.609063] mlx5e_tc_add_fdb_flow+0x1ea/0x360 [mlx5_core] [ 164.609627] __mlx5e_add_fdb_flow+0x2d2/0x430 [mlx5_core] [ 164.610175] mlx5e_configure_flower+0x952/0x1a20 [mlx5_core] [ 164.610741] tc_setup_cb_add+0xd4/0x200 [ 164.611146] fl_hw_replace_filter+0x14c/0x1f0 [cls_flower] [ 164.611661] fl_change+0xc95/0x18a0 [cls_flower] [ 164.612116] tc_new_tfilter+0x3fc/0xd20 [ 164.612516] rtnetlink_rcv_msg+0x418/0x5b0 [ 164.612936] netlink_rcv_skb+0x54/0x100 [ 164.613339] netlink_unicast+0x190/0x250 [ 164.613746] netlink_sendmsg+0x245/0x4a0 [ 164.614150] sock_sendmsg+0x38/0x60 [ 164.614522] ____sys_sendmsg+0x1d0/0x1e0 [ 164.614934] ___sys_sendmsg+0x80/0xc0 [ 164.615320] __sys_sendmsg+0x51/0x90 [ 164.615701] do_syscall_64+0x3d/0x90 [ 164.616083] entry_SYSCALL_64_after_hwframe+0x46/0xb0 [ 164.616568] -> #0 (&esw->offloads.encap_tbl_lock){+.+.}-{3:3}: [ 164.617210] __lock_acquire+0x159e/0x26e0 [ 164.617638] lock_acquire+0xc2/0x2a0 [ 164.618018] __mutex_lock+0x92/0xcd0 [ 164.618401] mlx5e_attach_encap+0xd8/0x8b0 [mlx5_core] [ 164.618943] post_process_attr+0x153/0x2d0 [mlx5_core] [ 164.619471] mlx5e_tc_add_fdb_flow+0x164/0x360 [mlx5_core] [ 164.620021] __mlx5e_add_fdb_flow+0x2d2/0x430 [mlx5_core] [ 164.620564] mlx5e_configure_flower+0xe33/0x1a20 [mlx5_core] [ 164.621125] tc_setup_cb_add+0xd4/0x200 [ 164.621531] fl_hw_replace_filter+0x14c/0x1f0 [cls_flower] [ 164.622047] fl_change+0xc95/0x18a0 [cls_flower] [ 164.622500] tc_new_tfilter+0x3fc/0xd20 [ 164.622906] rtnetlink_rcv_msg+0x418/0x5b0 [ 164.623324] netlink_rcv_skb+0x54/0x100 [ 164.623727] netlink_unicast+0x190/0x250 [ 164.624138] netlink_sendmsg+0x245/0x4a0 [ 164.624544] sock_sendmsg+0x38/0x60 [ 164.624919] ____sys_sendmsg+0x1d0/0x1e0 [ 164.625340] ___sys_sendmsg+0x80/0xc0 [ 164.625731] __sys_sendmsg+0x51/0x90 [ 164.626117] do_syscall_64+0x3d/0x90 [ 164.626502] entry_SYSCALL_64_after_hwframe+0x46/0xb0 [ 164.626995] other info that might help us debug this: [ 164.627725] Possible unsafe locking scenario: [ 164.628268] CPU0 CPU1 [ 164.628683] ---- ---- [ 164.629098] lock(&comp->sem); [ 164.629421] lock(&esw->offloads.encap_tbl_lock); [ 164.630066] lock(&comp->sem); [ 164.630555] lock(&esw->offloads.encap_tbl_lock); [ 164.630993] *** DEADLOCK *** [ 164.631575] 3 locks held by handler1/3456: [ 164.631962] #0: ffff888124b75130 (&block->cb_lock){++++}-{3:3}, at: tc_setup_cb_add+0x5b/0x200 [ 164.632703] #1: ffff888116e512b8 (&esw->mode_lock){++++}-{3:3}, at: mlx5_esw_hold+0x39/0x50 [mlx5_core] [ 164.633552] #2: ffff88810137fc98 (&comp->sem){++++}-{3:3}, at: mlx5_devcom_get_peer_data+0x37/0x80 [mlx5_core] [ 164.634435] stack backtrace: [ 164.634883] CPU: 17 PID: 3456 Comm: handler1 Not tainted 6.3.0-rc3+ #1 [ 164.635431] Hardware name: QEMU Standard PC (Q35 + ICH9, 2009), BIOS rel-1.13.0-0-gf21b5a4aeb02-prebuilt.qemu.org 04/01/2014 [ 164.636340] Call Trace: [ 164.636616] [ 164.636863] dump_stack_lvl+0x47/0x70 [ 164.637217] check_noncircular+0xfe/0x110 [ 164.637601] __lock_acquire+0x159e/0x26e0 [ 164.637977] ? mlx5_cmd_set_fte+0x5b0/0x830 [mlx5_core] [ 164.638472] lock_acquire+0xc2/0x2a0 [ 164.638828] ? mlx5e_attach_encap+0xd8/0x8b0 [mlx5_core] [ 164.639339] ? lock_is_held_type+0x98/0x110 [ 164.639728] __mutex_lock+0x92/0xcd0 [ 164.640074] ? mlx5e_attach_encap+0xd8/0x8b0 [mlx5_core] [ 164.640576] ? __lock_acquire+0x382/0x26e0 [ 164.640958] ? mlx5e_attach_encap+0xd8/0x8b0 [mlx5_core] [ 164.641468] ? mlx5e_attach_encap+0xd8/0x8b0 [mlx5_core] [ 164.641965] mlx5e_attach_encap+0xd8/0x8b0 [mlx5_core] [ 164.642454] ? lock_release+0xbf/0x240 [ 164.642819] post_process_attr+0x153/0x2d0 [mlx5_core] [ 164.643318] mlx5e_tc_add_fdb_flow+0x164/0x360 [mlx5_core] [ 164.643835] __mlx5e_add_fdb_flow+0x2d2/0x430 [mlx5_core] [ 164.644340] mlx5e_configure_flower+0xe33/0x1a20 [mlx5_core] [ 164.644862] ? lock_acquire+0xc2/0x2a0 [ 164.645219] tc_setup_cb_add+0xd4/0x200 [ 164.645588] fl_hw_replace_filter+0x14c/0x1f0 [cls_flower] [ 164.646067] fl_change+0xc95/0x18a0 [cls_flower] [ 164.646488] tc_new_tfilter+0x3fc/0xd20 [ 164.646861] ? tc_del_tfilter+0x810/0x810 [ 164.647236] rtnetlink_rcv_msg+0x418/0x5b0 [ 164.647621] ? rtnl_setlink+0x160/0x160 [ 164.647982] netlink_rcv_skb+0x54/0x100 [ 164.648348] netlink_unicast+0x190/0x250 [ 164.648722] netlink_sendmsg+0x245/0x4a0 [ 164.649090] sock_sendmsg+0x38/0x60 [ 164.649434] ____sys_sendmsg+0x1d0/0x1e0 [ 164.649804] ? copy_msghdr_from_user+0x6d/0xa0 [ 164.650213] ___sys_sendmsg+0x80/0xc0 [ 164.650563] ? lock_acquire+0xc2/0x2a0 [ 164.650926] ? lock_acquire+0xc2/0x2a0 [ 164.651286] ? __fget_files+0x5/0x190 [ 164.651644] ? find_held_lock+0x2b/0x80 [ 164.652006] ? __fget_files+0xb9/0x190 [ 164.652365] ? lock_release+0xbf/0x240 [ 164.652723] ? __fget_files+0xd3/0x190 [ 164.653079] __sys_sendmsg+0x51/0x90 [ 164.653435] do_syscall_64+0x3d/0x90 [ 164.653784] entry_SYSCALL_64_after_hwframe+0x46/0xb0 [ 164.654229] RIP: 0033:0x7f378054f8bd [ 164.654577] Code: 28 89 54 24 1c 48 89 74 24 10 89 7c 24 08 e8 6a c3 f4 ff 8b 54 24 1c 48 8b 74 24 10 41 89 c0 8b 7c 24 08 b8 2e 00 00 00 0f 05 <48> 3d 00 f0 ff ff 77 33 44 89 c7 48 89 44 24 08 e8 be c3 f4 ff 48 [ 164.656041] RSP: 002b:00007f377fa114b0 EFLAGS: 00000293 ORIG_RAX: 000000000000002e [ 164.656701] RAX: ffffffffffffffda RBX: 0000000000000001 RCX: 00007f378054f8bd [ 164.657297] RDX: 0000000000000000 RSI: 00007f377fa11540 RDI: 0000000000000014 [ 164.657885] RBP: 00007f377fa12278 R08: 0000000000000000 R09: 000000000000015c [ 164.658472] R10: 00007f377fa123d0 R11: 0000000000000293 R12: 0000560962d99bd0 [ 164.665317] R13: 0000000000000000 R14: 0000560962d99bd0 R15: 00007f377fa11540 Fixes: f9d196bd632b ("net/mlx5e: Use correct eswitch for stack devices with lag") Signed-off-by: Vlad Buslov Reviewed-by: Roi Dayan Reviewed-by: Shay Drory Reviewed-by: Tariq Toukan Signed-off-by: Saeed Mahameed --- .../net/ethernet/mellanox/mlx5/core/en_tc.c | 19 ++++---- .../ethernet/mellanox/mlx5/core/lib/devcom.c | 48 ++++++++++++++----- .../ethernet/mellanox/mlx5/core/lib/devcom.h | 1 + 3 files changed, 48 insertions(+), 20 deletions(-) diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c index 65fe40f55d84..416ab6b6da97 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c @@ -1665,11 +1665,9 @@ bool mlx5e_tc_is_vf_tunnel(struct net_device *out_dev, struct net_device *route_ int mlx5e_tc_query_route_vport(struct net_device *out_dev, struct net_device *route_dev, u16 *vport) { struct mlx5e_priv *out_priv, *route_priv; - struct mlx5_devcom *devcom = NULL; struct mlx5_core_dev *route_mdev; struct mlx5_eswitch *esw; u16 vhca_id; - int err; out_priv = netdev_priv(out_dev); esw = out_priv->mdev->priv.eswitch; @@ -1678,6 +1676,9 @@ int mlx5e_tc_query_route_vport(struct net_device *out_dev, struct net_device *ro vhca_id = MLX5_CAP_GEN(route_mdev, vhca_id); if (mlx5_lag_is_active(out_priv->mdev)) { + struct mlx5_devcom *devcom; + int err; + /* In lag case we may get devices from different eswitch instances. * If we failed to get vport num, it means, mostly, that we on the wrong * eswitch. @@ -1686,16 +1687,16 @@ int mlx5e_tc_query_route_vport(struct net_device *out_dev, struct net_device *ro if (err != -ENOENT) return err; + rcu_read_lock(); devcom = out_priv->mdev->priv.devcom; - esw = mlx5_devcom_get_peer_data(devcom, MLX5_DEVCOM_ESW_OFFLOADS); - if (!esw) - return -ENODEV; + esw = mlx5_devcom_get_peer_data_rcu(devcom, MLX5_DEVCOM_ESW_OFFLOADS); + err = esw ? mlx5_eswitch_vhca_id_to_vport(esw, vhca_id, vport) : -ENODEV; + rcu_read_unlock(); + + return err; } - err = mlx5_eswitch_vhca_id_to_vport(esw, vhca_id, vport); - if (devcom) - mlx5_devcom_release_peer_data(devcom, MLX5_DEVCOM_ESW_OFFLOADS); - return err; + return mlx5_eswitch_vhca_id_to_vport(esw, vhca_id, vport); } static int diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lib/devcom.c b/drivers/net/ethernet/mellanox/mlx5/core/lib/devcom.c index adefde3ea941..070d55f13419 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/lib/devcom.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/lib/devcom.c @@ -13,7 +13,7 @@ static LIST_HEAD(devcom_list); struct mlx5_devcom_component { struct { - void *data; + void __rcu *data; } device[MLX5_DEVCOM_PORTS_SUPPORTED]; mlx5_devcom_event_handler_t handler; @@ -162,7 +162,7 @@ void mlx5_devcom_register_component(struct mlx5_devcom *devcom, comp = &devcom->priv->components[id]; down_write(&comp->sem); comp->handler = handler; - comp->device[devcom->idx].data = data; + rcu_assign_pointer(comp->device[devcom->idx].data, data); up_write(&comp->sem); } @@ -176,8 +176,9 @@ void mlx5_devcom_unregister_component(struct mlx5_devcom *devcom, comp = &devcom->priv->components[id]; down_write(&comp->sem); - comp->device[devcom->idx].data = NULL; + RCU_INIT_POINTER(comp->device[devcom->idx].data, NULL); up_write(&comp->sem); + synchronize_rcu(); } int mlx5_devcom_send_event(struct mlx5_devcom *devcom, @@ -193,12 +194,15 @@ int mlx5_devcom_send_event(struct mlx5_devcom *devcom, comp = &devcom->priv->components[id]; down_write(&comp->sem); - for (i = 0; i < MLX5_DEVCOM_PORTS_SUPPORTED; i++) - if (i != devcom->idx && comp->device[i].data) { - err = comp->handler(event, comp->device[i].data, - event_data); + for (i = 0; i < MLX5_DEVCOM_PORTS_SUPPORTED; i++) { + void *data = rcu_dereference_protected(comp->device[i].data, + lockdep_is_held(&comp->sem)); + + if (i != devcom->idx && data) { + err = comp->handler(event, data, event_data); break; } + } up_write(&comp->sem); return err; @@ -213,7 +217,7 @@ void mlx5_devcom_set_paired(struct mlx5_devcom *devcom, comp = &devcom->priv->components[id]; WARN_ON(!rwsem_is_locked(&comp->sem)); - comp->paired = paired; + WRITE_ONCE(comp->paired, paired); } bool mlx5_devcom_is_paired(struct mlx5_devcom *devcom, @@ -222,7 +226,7 @@ bool mlx5_devcom_is_paired(struct mlx5_devcom *devcom, if (IS_ERR_OR_NULL(devcom)) return false; - return devcom->priv->components[id].paired; + return READ_ONCE(devcom->priv->components[id].paired); } void *mlx5_devcom_get_peer_data(struct mlx5_devcom *devcom, @@ -236,7 +240,7 @@ void *mlx5_devcom_get_peer_data(struct mlx5_devcom *devcom, comp = &devcom->priv->components[id]; down_read(&comp->sem); - if (!comp->paired) { + if (!READ_ONCE(comp->paired)) { up_read(&comp->sem); return NULL; } @@ -245,7 +249,29 @@ void *mlx5_devcom_get_peer_data(struct mlx5_devcom *devcom, if (i != devcom->idx) break; - return comp->device[i].data; + return rcu_dereference_protected(comp->device[i].data, lockdep_is_held(&comp->sem)); +} + +void *mlx5_devcom_get_peer_data_rcu(struct mlx5_devcom *devcom, enum mlx5_devcom_components id) +{ + struct mlx5_devcom_component *comp; + int i; + + if (IS_ERR_OR_NULL(devcom)) + return NULL; + + for (i = 0; i < MLX5_DEVCOM_PORTS_SUPPORTED; i++) + if (i != devcom->idx) + break; + + comp = &devcom->priv->components[id]; + /* This can change concurrently, however 'data' pointer will remain + * valid for the duration of RCU read section. + */ + if (!READ_ONCE(comp->paired)) + return NULL; + + return rcu_dereference(comp->device[i].data); } void mlx5_devcom_release_peer_data(struct mlx5_devcom *devcom, diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lib/devcom.h b/drivers/net/ethernet/mellanox/mlx5/core/lib/devcom.h index 94313c18bb64..9a496f4722da 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/lib/devcom.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/lib/devcom.h @@ -41,6 +41,7 @@ bool mlx5_devcom_is_paired(struct mlx5_devcom *devcom, void *mlx5_devcom_get_peer_data(struct mlx5_devcom *devcom, enum mlx5_devcom_components id); +void *mlx5_devcom_get_peer_data_rcu(struct mlx5_devcom *devcom, enum mlx5_devcom_components id); void mlx5_devcom_release_peer_data(struct mlx5_devcom *devcom, enum mlx5_devcom_components id); From 7aa50380191635e5897a773f272829cc961a2be5 Mon Sep 17 00:00:00 2001 From: Rahul Rameshbabu Date: Tue, 21 Feb 2023 16:18:48 -0800 Subject: [PATCH 39/79] net/mlx5e: Fix SQ wake logic in ptp napi_poll context Check in the mlx5e_ptp_poll_ts_cq context if the ptp tx sq should be woken up. Before change, the ptp tx sq may never wake up if the ptp tx ts skb fifo is full when mlx5e_poll_tx_cq checks if the queue should be woken up. Fixes: 1880bc4e4a96 ("net/mlx5e: Add TX port timestamp support") Signed-off-by: Rahul Rameshbabu Reviewed-by: Tariq Toukan Signed-off-by: Saeed Mahameed --- .../net/ethernet/mellanox/mlx5/core/en/ptp.c | 2 ++ .../net/ethernet/mellanox/mlx5/core/en/txrx.h | 2 ++ .../net/ethernet/mellanox/mlx5/core/en_tx.c | 19 ++++++++++++------- 3 files changed, 16 insertions(+), 7 deletions(-) diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/ptp.c b/drivers/net/ethernet/mellanox/mlx5/core/en/ptp.c index eb5abd0e55d9..3cbebfba582b 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en/ptp.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/ptp.c @@ -175,6 +175,8 @@ static bool mlx5e_ptp_poll_ts_cq(struct mlx5e_cq *cq, int budget) /* ensure cq space is freed before enabling more cqes */ wmb(); + mlx5e_txqsq_wake(&ptpsq->txqsq); + return work_done == budget; } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/txrx.h b/drivers/net/ethernet/mellanox/mlx5/core/en/txrx.h index 47381e949f1f..879d698b6119 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en/txrx.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/txrx.h @@ -193,6 +193,8 @@ static inline u16 mlx5e_txqsq_get_next_pi(struct mlx5e_txqsq *sq, u16 size) return pi; } +void mlx5e_txqsq_wake(struct mlx5e_txqsq *sq); + static inline u16 mlx5e_shampo_get_cqe_header_index(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe) { return be16_to_cpu(cqe->shampo.header_entry_index) & (rq->mpwqe.shampo->hd_per_wq - 1); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c index df5e780e8e6a..c7eb6b238c2b 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c @@ -762,6 +762,17 @@ static void mlx5e_tx_wi_consume_fifo_skbs(struct mlx5e_txqsq *sq, struct mlx5e_t } } +void mlx5e_txqsq_wake(struct mlx5e_txqsq *sq) +{ + if (netif_tx_queue_stopped(sq->txq) && + mlx5e_wqc_has_room_for(&sq->wq, sq->cc, sq->pc, sq->stop_room) && + mlx5e_ptpsq_fifo_has_room(sq) && + !test_bit(MLX5E_SQ_STATE_RECOVERING, &sq->state)) { + netif_tx_wake_queue(sq->txq); + sq->stats->wake++; + } +} + bool mlx5e_poll_tx_cq(struct mlx5e_cq *cq, int napi_budget) { struct mlx5e_sq_stats *stats; @@ -861,13 +872,7 @@ bool mlx5e_poll_tx_cq(struct mlx5e_cq *cq, int napi_budget) netdev_tx_completed_queue(sq->txq, npkts, nbytes); - if (netif_tx_queue_stopped(sq->txq) && - mlx5e_wqc_has_room_for(&sq->wq, sq->cc, sq->pc, sq->stop_room) && - mlx5e_ptpsq_fifo_has_room(sq) && - !test_bit(MLX5E_SQ_STATE_RECOVERING, &sq->state)) { - netif_tx_wake_queue(sq->txq); - stats->wake++; - } + mlx5e_txqsq_wake(sq); return (i == MLX5E_TX_CQ_POLL_BUDGET); } From dfa1e46d6093831b9d49f0f350227a1d13644a2f Mon Sep 17 00:00:00 2001 From: Paul Blakey Date: Wed, 26 Apr 2023 16:04:48 +0300 Subject: [PATCH 40/79] net/mlx5e: TC, Fix using eswitch mapping in nic mode Cited patch is using the eswitch object mapping pool while in nic mode where it isn't initialized. This results in the trace below [0]. Fix that by using either nic or eswitch object mapping pool depending if eswitch is enabled or not. [0]: [ 826.446057] ================================================================== [ 826.446729] BUG: KASAN: slab-use-after-free in mlx5_add_flow_rules+0x30/0x490 [mlx5_core] [ 826.447515] Read of size 8 at addr ffff888194485830 by task tc/6233 [ 826.448243] CPU: 16 PID: 6233 Comm: tc Tainted: G W 6.3.0-rc6+ #1 [ 826.448890] Hardware name: QEMU Standard PC (Q35 + ICH9, 2009), BIOS rel-1.13.0-0-gf21b5a4aeb02-prebuilt.qemu.org 04/01/2014 [ 826.449785] Call Trace: [ 826.450052] [ 826.450302] dump_stack_lvl+0x33/0x50 [ 826.450650] print_report+0xc2/0x610 [ 826.450998] ? __virt_addr_valid+0xb1/0x130 [ 826.451385] ? mlx5_add_flow_rules+0x30/0x490 [mlx5_core] [ 826.451935] kasan_report+0xae/0xe0 [ 826.452276] ? mlx5_add_flow_rules+0x30/0x490 [mlx5_core] [ 826.452829] mlx5_add_flow_rules+0x30/0x490 [mlx5_core] [ 826.453368] ? __kmalloc_node+0x5a/0x120 [ 826.453733] esw_add_restore_rule+0x20f/0x270 [mlx5_core] [ 826.454288] ? mlx5_eswitch_add_send_to_vport_meta_rule+0x260/0x260 [mlx5_core] [ 826.455011] ? mutex_unlock+0x80/0xd0 [ 826.455361] ? __mutex_unlock_slowpath.constprop.0+0x210/0x210 [ 826.455862] ? mapping_add+0x2cb/0x440 [mlx5_core] [ 826.456425] mlx5e_tc_action_miss_mapping_get+0x139/0x180 [mlx5_core] [ 826.457058] ? mlx5e_tc_update_skb_nic+0xb0/0xb0 [mlx5_core] [ 826.457636] ? __kasan_kmalloc+0x77/0x90 [ 826.458000] ? __kmalloc+0x57/0x120 [ 826.458336] mlx5_tc_ct_flow_offload+0x325/0xe40 [mlx5_core] [ 826.458916] ? ct_kernel_enter.constprop.0+0x48/0xa0 [ 826.459360] ? mlx5_tc_ct_parse_action+0xf0/0xf0 [mlx5_core] [ 826.459933] ? mlx5e_mod_hdr_attach+0x491/0x520 [mlx5_core] [ 826.460507] ? mlx5e_mod_hdr_get+0x12/0x20 [mlx5_core] [ 826.461046] ? mlx5e_tc_attach_mod_hdr+0x154/0x170 [mlx5_core] [ 826.461635] mlx5e_configure_flower+0x969/0x2110 [mlx5_core] [ 826.462217] ? _raw_spin_lock_bh+0x85/0xe0 [ 826.462597] ? __mlx5e_add_fdb_flow+0x750/0x750 [mlx5_core] [ 826.463163] ? kasan_save_stack+0x2e/0x40 [ 826.463534] ? down_read+0x115/0x1b0 [ 826.463878] ? down_write_killable+0x110/0x110 [ 826.464288] ? tc_setup_action.part.0+0x9f/0x3b0 [ 826.464701] ? mlx5e_is_uplink_rep+0x4c/0x90 [mlx5_core] [ 826.465253] ? mlx5e_tc_reoffload_flows_work+0x130/0x130 [mlx5_core] [ 826.465878] tc_setup_cb_add+0x112/0x250 [ 826.466247] fl_hw_replace_filter+0x230/0x310 [cls_flower] [ 826.466724] ? fl_hw_destroy_filter+0x1a0/0x1a0 [cls_flower] [ 826.467212] fl_change+0x14e1/0x2030 [cls_flower] [ 826.467636] ? sock_def_readable+0x89/0x120 [ 826.468019] ? fl_tmplt_create+0x2d0/0x2d0 [cls_flower] [ 826.468509] ? kasan_unpoison+0x23/0x50 [ 826.468873] ? get_random_u16+0x180/0x180 [ 826.469244] ? __radix_tree_lookup+0x2b/0x130 [ 826.469640] ? fl_get+0x7b/0x140 [cls_flower] [ 826.470042] ? fl_mask_put+0x200/0x200 [cls_flower] [ 826.470478] ? __mutex_unlock_slowpath.constprop.0+0x210/0x210 [ 826.470973] ? fl_tmplt_create+0x2d0/0x2d0 [cls_flower] [ 826.471427] tc_new_tfilter+0x644/0x1050 [ 826.471795] ? tc_get_tfilter+0x860/0x860 [ 826.472170] ? __thaw_task+0x130/0x130 [ 826.472525] ? arch_stack_walk+0x98/0xf0 [ 826.472892] ? cap_capable+0x9f/0xd0 [ 826.473235] ? security_capable+0x47/0x60 [ 826.473608] rtnetlink_rcv_msg+0x1d5/0x550 [ 826.473985] ? rtnl_calcit.isra.0+0x1f0/0x1f0 [ 826.474383] ? __stack_depot_save+0x35/0x4c0 [ 826.474779] ? kasan_save_stack+0x2e/0x40 [ 826.475149] ? kasan_save_stack+0x1e/0x40 [ 826.475518] ? __kasan_record_aux_stack+0x9f/0xb0 [ 826.475939] ? task_work_add+0x77/0x1c0 [ 826.476305] netlink_rcv_skb+0xe0/0x210 [ 826.476661] ? rtnl_calcit.isra.0+0x1f0/0x1f0 [ 826.477057] ? netlink_ack+0x7c0/0x7c0 [ 826.477412] ? rhashtable_jhash2+0xef/0x150 [ 826.477796] ? _copy_from_iter+0x105/0x770 [ 826.484386] netlink_unicast+0x346/0x490 [ 826.484755] ? netlink_attachskb+0x400/0x400 [ 826.485145] ? kernel_text_address+0xc2/0xd0 [ 826.485535] netlink_sendmsg+0x3b0/0x6c0 [ 826.485902] ? kernel_text_address+0xc2/0xd0 [ 826.486296] ? netlink_unicast+0x490/0x490 [ 826.486671] ? iovec_from_user.part.0+0x7a/0x1a0 [ 826.487083] ? netlink_unicast+0x490/0x490 [ 826.487461] sock_sendmsg+0x73/0xc0 [ 826.487803] ____sys_sendmsg+0x364/0x380 [ 826.488186] ? import_iovec+0x7/0x10 [ 826.488531] ? kernel_sendmsg+0x30/0x30 [ 826.488893] ? __copy_msghdr+0x180/0x180 [ 826.489258] ? kasan_save_stack+0x2e/0x40 [ 826.489629] ? kasan_save_stack+0x1e/0x40 [ 826.490002] ? __kasan_record_aux_stack+0x9f/0xb0 [ 826.490424] ? __call_rcu_common.constprop.0+0x46/0x580 [ 826.490876] ___sys_sendmsg+0xdf/0x140 [ 826.491231] ? copy_msghdr_from_user+0x110/0x110 [ 826.491649] ? fget_raw+0x120/0x120 [ 826.491988] ? ___sys_recvmsg+0xd9/0x130 [ 826.492355] ? folio_batch_add_and_move+0x80/0xa0 [ 826.492776] ? _raw_spin_lock+0x7a/0xd0 [ 826.493137] ? _raw_spin_lock+0x7a/0xd0 [ 826.493500] ? _raw_read_lock_irq+0x30/0x30 [ 826.493880] ? kasan_set_track+0x21/0x30 [ 826.494249] ? kasan_save_free_info+0x2a/0x40 [ 826.494650] ? do_sys_openat2+0xff/0x270 [ 826.495016] ? __fget_light+0x1b5/0x200 [ 826.495377] ? __virt_addr_valid+0xb1/0x130 [ 826.495763] __sys_sendmsg+0xb2/0x130 [ 826.496118] ? __sys_sendmsg_sock+0x20/0x20 [ 826.496501] ? __x64_sys_rseq+0x2e0/0x2e0 [ 826.496874] ? do_user_addr_fault+0x276/0x820 [ 826.497273] ? fpregs_assert_state_consistent+0x52/0x60 [ 826.497727] ? exit_to_user_mode_prepare+0x30/0x120 [ 826.498158] do_syscall_64+0x3d/0x90 [ 826.498502] entry_SYSCALL_64_after_hwframe+0x46/0xb0 [ 826.498949] RIP: 0033:0x7f9b67f4f887 [ 826.499294] Code: 0a 00 f7 d8 64 89 02 48 c7 c0 ff ff ff ff eb b9 0f 1f 00 f3 0f 1e fa 64 8b 04 25 18 00 00 00 85 c0 75 10 b8 2e 00 00 00 0f 05 <48> 3d 00 f0 ff ff 77 51 c3 48 83 ec 28 89 54 24 1c 48 89 74 24 10 [ 826.500742] RSP: 002b:00007fff5d1a5498 EFLAGS: 00000246 ORIG_RAX: 000000000000002e [ 826.501395] RAX: ffffffffffffffda RBX: 0000000064413ce6 RCX: 00007f9b67f4f887 [ 826.501975] RDX: 0000000000000000 RSI: 00007fff5d1a5500 RDI: 0000000000000003 [ 826.502556] RBP: 0000000000000000 R08: 0000000000000001 R09: 0000000000000001 [ 826.503135] R10: 00007f9b67e08708 R11: 0000000000000246 R12: 0000000000000001 [ 826.503714] R13: 0000000000000001 R14: 00007fff5d1a9800 R15: 0000000000485400 [ 826.504304] [ 826.504753] Allocated by task 3764: [ 826.505090] kasan_save_stack+0x1e/0x40 [ 826.505453] kasan_set_track+0x21/0x30 [ 826.505810] __kasan_kmalloc+0x77/0x90 [ 826.506164] __mlx5_create_flow_table+0x16d/0xbb0 [mlx5_core] [ 826.506742] esw_offloads_enable+0x60d/0xfb0 [mlx5_core] [ 826.507292] mlx5_eswitch_enable_locked+0x4d3/0x680 [mlx5_core] [ 826.507885] mlx5_devlink_eswitch_mode_set+0x2a3/0x580 [mlx5_core] [ 826.508513] devlink_nl_cmd_eswitch_set_doit+0xdf/0x1f0 [ 826.508969] genl_family_rcv_msg_doit.isra.0+0x146/0x1c0 [ 826.509427] genl_rcv_msg+0x28d/0x3e0 [ 826.509772] netlink_rcv_skb+0xe0/0x210 [ 826.510133] genl_rcv+0x24/0x40 [ 826.510448] netlink_unicast+0x346/0x490 [ 826.510810] netlink_sendmsg+0x3b0/0x6c0 [ 826.511179] sock_sendmsg+0x73/0xc0 [ 826.511519] __sys_sendto+0x18d/0x220 [ 826.511867] __x64_sys_sendto+0x72/0x80 [ 826.512232] do_syscall_64+0x3d/0x90 [ 826.512576] entry_SYSCALL_64_after_hwframe+0x46/0xb0 [ 826.513220] Freed by task 5674: [ 826.513535] kasan_save_stack+0x1e/0x40 [ 826.513893] kasan_set_track+0x21/0x30 [ 826.514245] kasan_save_free_info+0x2a/0x40 [ 826.514629] ____kasan_slab_free+0x11a/0x1b0 [ 826.515021] __kmem_cache_free+0x14d/0x280 [ 826.515399] tree_put_node+0x109/0x1c0 [mlx5_core] [ 826.515907] mlx5_destroy_flow_table+0x119/0x630 [mlx5_core] [ 826.516481] esw_offloads_steering_cleanup+0xe7/0x150 [mlx5_core] [ 826.517084] esw_offloads_disable+0xe0/0x160 [mlx5_core] [ 826.517632] mlx5_eswitch_disable_locked+0x26c/0x290 [mlx5_core] [ 826.518225] mlx5_devlink_eswitch_mode_set+0x128/0x580 [mlx5_core] [ 826.518834] devlink_nl_cmd_eswitch_set_doit+0xdf/0x1f0 [ 826.519286] genl_family_rcv_msg_doit.isra.0+0x146/0x1c0 [ 826.519748] genl_rcv_msg+0x28d/0x3e0 [ 826.520101] netlink_rcv_skb+0xe0/0x210 [ 826.520458] genl_rcv+0x24/0x40 [ 826.520771] netlink_unicast+0x346/0x490 [ 826.521137] netlink_sendmsg+0x3b0/0x6c0 [ 826.521505] sock_sendmsg+0x73/0xc0 [ 826.521842] __sys_sendto+0x18d/0x220 [ 826.522191] __x64_sys_sendto+0x72/0x80 [ 826.522554] do_syscall_64+0x3d/0x90 [ 826.522894] entry_SYSCALL_64_after_hwframe+0x46/0xb0 [ 826.523540] Last potentially related work creation: [ 826.523969] kasan_save_stack+0x1e/0x40 [ 826.524331] __kasan_record_aux_stack+0x9f/0xb0 [ 826.524739] insert_work+0x30/0x130 [ 826.525078] __queue_work+0x34b/0x690 [ 826.525426] queue_work_on+0x48/0x50 [ 826.525766] __rhashtable_remove_fast_one+0x4af/0x4d0 [mlx5_core] [ 826.526365] del_sw_flow_group+0x1b5/0x270 [mlx5_core] [ 826.526898] tree_put_node+0x109/0x1c0 [mlx5_core] [ 826.527407] esw_offloads_steering_cleanup+0xd3/0x150 [mlx5_core] [ 826.528009] esw_offloads_disable+0xe0/0x160 [mlx5_core] [ 826.528616] mlx5_eswitch_disable_locked+0x26c/0x290 [mlx5_core] [ 826.529218] mlx5_devlink_eswitch_mode_set+0x128/0x580 [mlx5_core] [ 826.529823] devlink_nl_cmd_eswitch_set_doit+0xdf/0x1f0 [ 826.530276] genl_family_rcv_msg_doit.isra.0+0x146/0x1c0 [ 826.530733] genl_rcv_msg+0x28d/0x3e0 [ 826.531079] netlink_rcv_skb+0xe0/0x210 [ 826.531439] genl_rcv+0x24/0x40 [ 826.531755] netlink_unicast+0x346/0x490 [ 826.532123] netlink_sendmsg+0x3b0/0x6c0 [ 826.532487] sock_sendmsg+0x73/0xc0 [ 826.532825] __sys_sendto+0x18d/0x220 [ 826.533175] __x64_sys_sendto+0x72/0x80 [ 826.533533] do_syscall_64+0x3d/0x90 [ 826.533877] entry_SYSCALL_64_after_hwframe+0x46/0xb0 [ 826.534521] The buggy address belongs to the object at ffff888194485800 which belongs to the cache kmalloc-512 of size 512 [ 826.535506] The buggy address is located 48 bytes inside of freed 512-byte region [ffff888194485800, ffff888194485a00) [ 826.536666] The buggy address belongs to the physical page: [ 826.537138] page:00000000d75841dd refcount:1 mapcount:0 mapping:0000000000000000 index:0x0 pfn:0x194480 [ 826.537915] head:00000000d75841dd order:3 entire_mapcount:0 nr_pages_mapped:0 pincount:0 [ 826.538595] flags: 0x200000000010200(slab|head|node=0|zone=2) [ 826.539089] raw: 0200000000010200 ffff888100042c80 ffffea0004523800 dead000000000002 [ 826.539755] raw: 0000000000000000 0000000000200020 00000001ffffffff 0000000000000000 [ 826.540417] page dumped because: kasan: bad access detected [ 826.541095] Memory state around the buggy address: [ 826.541519] ffff888194485700: fc fc fc fc fc fc fc fc fc fc fc fc fc fc fc fc [ 826.542149] ffff888194485780: fc fc fc fc fc fc fc fc fc fc fc fc fc fc fc fc [ 826.542773] >ffff888194485800: fa fb fb fb fb fb fb fb fb fb fb fb fb fb fb fb [ 826.543400] ^ [ 826.543822] ffff888194485880: fb fb fb fb fb fb fb fb fb fb fb fb fb fb fb fb [ 826.544452] ffff888194485900: fb fb fb fb fb fb fb fb fb fb fb fb fb fb fb fb [ 826.545079] ================================================================== Fixes: 6702782845a5 ("net/mlx5e: TC, Set CT miss to the specific ct action instance") Signed-off-by: Paul Blakey Reviewed-by: Vlad Buslov Signed-off-by: Saeed Mahameed --- .../net/ethernet/mellanox/mlx5/core/en_tc.c | 34 +++++++++++++++---- 1 file changed, 27 insertions(+), 7 deletions(-) diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c index 416ab6b6da97..e95414ef1f04 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c @@ -5646,22 +5646,43 @@ bool mlx5e_tc_update_skb_nic(struct mlx5_cqe64 *cqe, struct sk_buff *skb) 0, NULL); } +static struct mapping_ctx * +mlx5e_get_priv_obj_mapping(struct mlx5e_priv *priv) +{ + struct mlx5e_tc_table *tc; + struct mlx5_eswitch *esw; + struct mapping_ctx *ctx; + + if (is_mdev_switchdev_mode(priv->mdev)) { + esw = priv->mdev->priv.eswitch; + ctx = esw->offloads.reg_c0_obj_pool; + } else { + tc = mlx5e_fs_get_tc(priv->fs); + ctx = tc->mapping; + } + + return ctx; +} + int mlx5e_tc_action_miss_mapping_get(struct mlx5e_priv *priv, struct mlx5_flow_attr *attr, u64 act_miss_cookie, u32 *act_miss_mapping) { - struct mlx5_eswitch *esw = priv->mdev->priv.eswitch; struct mlx5_mapped_obj mapped_obj = {}; + struct mlx5_eswitch *esw; struct mapping_ctx *ctx; int err; - ctx = esw->offloads.reg_c0_obj_pool; - + ctx = mlx5e_get_priv_obj_mapping(priv); mapped_obj.type = MLX5_MAPPED_OBJ_ACT_MISS; mapped_obj.act_miss_cookie = act_miss_cookie; err = mapping_add(ctx, &mapped_obj, act_miss_mapping); if (err) return err; + if (!is_mdev_switchdev_mode(priv->mdev)) + return 0; + + esw = priv->mdev->priv.eswitch; attr->act_id_restore_rule = esw_add_restore_rule(esw, *act_miss_mapping); if (IS_ERR(attr->act_id_restore_rule)) goto err_rule; @@ -5676,10 +5697,9 @@ err_rule: void mlx5e_tc_action_miss_mapping_put(struct mlx5e_priv *priv, struct mlx5_flow_attr *attr, u32 act_miss_mapping) { - struct mlx5_eswitch *esw = priv->mdev->priv.eswitch; - struct mapping_ctx *ctx; + struct mapping_ctx *ctx = mlx5e_get_priv_obj_mapping(priv); - ctx = esw->offloads.reg_c0_obj_pool; - mlx5_del_flow_rules(attr->act_id_restore_rule); + if (is_mdev_switchdev_mode(priv->mdev)) + mlx5_del_flow_rules(attr->act_id_restore_rule); mapping_remove(ctx, act_miss_mapping); } From 8c253dfc89efde6b5faddf9e7400e5d17884e042 Mon Sep 17 00:00:00 2001 From: Shay Drory Date: Mon, 6 Feb 2023 11:52:02 +0200 Subject: [PATCH 41/79] net/mlx5: E-switch, Devcom, sync devcom events and devcom comp register devcom events are sent to all registered component. Following the cited patch, it is possible for two components, e.g.: two eswitches, to send devcom events, while both components are registered. This means eswitch layer will do double un/pairing, which is double allocation and free of resources, even though only one un/pairing is needed. flow example: cpu0 cpu1 ---- ---- mlx5_devlink_eswitch_mode_set(dev0) esw_offloads_devcom_init() mlx5_devcom_register_component(esw0) mlx5_devlink_eswitch_mode_set(dev1) esw_offloads_devcom_init() mlx5_devcom_register_component(esw1) mlx5_devcom_send_event() mlx5_devcom_send_event() Hence, check whether the eswitches are already un/paired before free/allocation of resources. Fixes: 09b278462f16 ("net: devlink: enable parallel ops on netlink interface") Signed-off-by: Shay Drory Reviewed-by: Mark Bloch Signed-off-by: Saeed Mahameed --- drivers/net/ethernet/mellanox/mlx5/core/eswitch.h | 1 + .../net/ethernet/mellanox/mlx5/core/eswitch_offloads.c | 9 ++++++++- 2 files changed, 9 insertions(+), 1 deletion(-) diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h index 9f007c5438ee..add6cfa432a5 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h @@ -342,6 +342,7 @@ struct mlx5_eswitch { u32 large_group_num; } params; struct blocking_notifier_head n_head; + bool paired[MLX5_MAX_PORTS]; }; void esw_offloads_disable(struct mlx5_eswitch *esw); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c index 7c34c7cf506f..8d19c20d3447 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c @@ -2742,6 +2742,9 @@ static int mlx5_esw_offloads_devcom_event(int event, mlx5_eswitch_vport_match_metadata_enabled(peer_esw)) break; + if (esw->paired[mlx5_get_dev_index(peer_esw->dev)]) + break; + err = mlx5_esw_offloads_set_ns_peer(esw, peer_esw, true); if (err) goto err_out; @@ -2753,14 +2756,18 @@ static int mlx5_esw_offloads_devcom_event(int event, if (err) goto err_pair; + esw->paired[mlx5_get_dev_index(peer_esw->dev)] = true; + peer_esw->paired[mlx5_get_dev_index(esw->dev)] = true; mlx5_devcom_set_paired(devcom, MLX5_DEVCOM_ESW_OFFLOADS, true); break; case ESW_OFFLOADS_DEVCOM_UNPAIR: - if (!mlx5_devcom_is_paired(devcom, MLX5_DEVCOM_ESW_OFFLOADS)) + if (!esw->paired[mlx5_get_dev_index(peer_esw->dev)]) break; mlx5_devcom_set_paired(devcom, MLX5_DEVCOM_ESW_OFFLOADS, false); + esw->paired[mlx5_get_dev_index(peer_esw->dev)] = false; + peer_esw->paired[mlx5_get_dev_index(esw->dev)] = false; mlx5_esw_offloads_unpair(peer_esw); mlx5_esw_offloads_unpair(esw); mlx5_esw_offloads_set_ns_peer(esw, peer_esw, false); From af87194352cad882d787d06fb7efa714acd95427 Mon Sep 17 00:00:00 2001 From: Shay Drory Date: Tue, 2 May 2023 13:35:11 +0300 Subject: [PATCH 42/79] net/mlx5: Devcom, fix error flow in mlx5_devcom_register_device In case devcom allocation is failed, mlx5 is always freeing the priv. However, this priv might have been allocated by a different thread, and freeing it might lead to use-after-free bugs. Fix it by freeing the priv only in case it was allocated by the running thread. Fixes: fadd59fc50d0 ("net/mlx5: Introduce inter-device communication mechanism") Signed-off-by: Shay Drory Signed-off-by: Saeed Mahameed --- drivers/net/ethernet/mellanox/mlx5/core/lib/devcom.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lib/devcom.c b/drivers/net/ethernet/mellanox/mlx5/core/lib/devcom.c index 070d55f13419..8f978491dd32 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/lib/devcom.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/lib/devcom.c @@ -112,7 +112,8 @@ struct mlx5_devcom *mlx5_devcom_register_device(struct mlx5_core_dev *dev) priv->devs[idx] = dev; devcom = mlx5_devcom_alloc(priv, idx); if (!devcom) { - kfree(priv); + if (new_priv) + kfree(priv); return ERR_PTR(-ENOMEM); } From 1f893f57a3bf9fe1f4bcb25b55aea7f7f9712fe7 Mon Sep 17 00:00:00 2001 From: Shay Drory Date: Tue, 2 May 2023 13:36:42 +0300 Subject: [PATCH 43/79] net/mlx5: Devcom, serialize devcom registration From one hand, mlx5 driver is allowing to probe PFs in parallel. From the other hand, devcom, which is a share resource between PFs, is registered without any lock. This might resulted in memory problems. Hence, use the global mlx5_dev_list_lock in order to serialize devcom registration. Fixes: fadd59fc50d0 ("net/mlx5: Introduce inter-device communication mechanism") Signed-off-by: Shay Drory Reviewed-by: Mark Bloch Signed-off-by: Saeed Mahameed --- .../ethernet/mellanox/mlx5/core/lib/devcom.c | 19 ++++++++++++++----- 1 file changed, 14 insertions(+), 5 deletions(-) diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lib/devcom.c b/drivers/net/ethernet/mellanox/mlx5/core/lib/devcom.c index 8f978491dd32..b7d779d08d83 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/lib/devcom.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/lib/devcom.c @@ -3,6 +3,7 @@ #include #include "lib/devcom.h" +#include "mlx5_core.h" static LIST_HEAD(devcom_list); @@ -77,6 +78,7 @@ struct mlx5_devcom *mlx5_devcom_register_device(struct mlx5_core_dev *dev) if (MLX5_CAP_GEN(dev, num_lag_ports) != MLX5_DEVCOM_PORTS_SUPPORTED) return NULL; + mlx5_dev_list_lock(); sguid0 = mlx5_query_nic_system_image_guid(dev); list_for_each_entry(iter, &devcom_list, list) { struct mlx5_core_dev *tmp_dev = NULL; @@ -102,8 +104,10 @@ struct mlx5_devcom *mlx5_devcom_register_device(struct mlx5_core_dev *dev) if (!priv) { priv = mlx5_devcom_list_alloc(); - if (!priv) - return ERR_PTR(-ENOMEM); + if (!priv) { + devcom = ERR_PTR(-ENOMEM); + goto out; + } idx = 0; new_priv = true; @@ -114,12 +118,14 @@ struct mlx5_devcom *mlx5_devcom_register_device(struct mlx5_core_dev *dev) if (!devcom) { if (new_priv) kfree(priv); - return ERR_PTR(-ENOMEM); + devcom = ERR_PTR(-ENOMEM); + goto out; } if (new_priv) list_add(&priv->list, &devcom_list); - +out: + mlx5_dev_list_unlock(); return devcom; } @@ -132,6 +138,7 @@ void mlx5_devcom_unregister_device(struct mlx5_devcom *devcom) if (IS_ERR_OR_NULL(devcom)) return; + mlx5_dev_list_lock(); priv = devcom->priv; priv->devs[devcom->idx] = NULL; @@ -142,10 +149,12 @@ void mlx5_devcom_unregister_device(struct mlx5_devcom *devcom) break; if (i != MLX5_DEVCOM_PORTS_SUPPORTED) - return; + goto out; list_del(&priv->list); kfree(priv); +out: + mlx5_dev_list_unlock(); } void mlx5_devcom_register_component(struct mlx5_devcom *devcom, From 9c2d08010963a61a171e8cb2852d3ce015b60cb4 Mon Sep 17 00:00:00 2001 From: Shay Drory Date: Thu, 13 Apr 2023 22:15:31 +0300 Subject: [PATCH 44/79] net/mlx5: Free irqs only on shutdown callback Whenever a shutdown is invoked, free irqs only and keep mlx5_irq synthetic wrapper intact in order to avoid use-after-free on system shutdown. for example: ================================================================== BUG: KASAN: use-after-free in _find_first_bit+0x66/0x80 Read of size 8 at addr ffff88823fc0d318 by task kworker/u192:0/13608 CPU: 25 PID: 13608 Comm: kworker/u192:0 Tainted: G B W O 6.1.21-cloudflare-kasan-2023.3.21 #1 Hardware name: GIGABYTE R162-R2-GEN0/MZ12-HD2-CD, BIOS R14 05/03/2021 Workqueue: mlx5e mlx5e_tx_timeout_work [mlx5_core] Call Trace: dump_stack_lvl+0x34/0x48 print_report+0x170/0x473 ? _find_first_bit+0x66/0x80 kasan_report+0xad/0x130 ? _find_first_bit+0x66/0x80 _find_first_bit+0x66/0x80 mlx5e_open_channels+0x3c5/0x3a10 [mlx5_core] ? console_unlock+0x2fa/0x430 ? _raw_spin_lock_irqsave+0x8d/0xf0 ? _raw_spin_unlock_irqrestore+0x42/0x80 ? preempt_count_add+0x7d/0x150 ? __wake_up_klogd.part.0+0x7d/0xc0 ? vprintk_emit+0xfe/0x2c0 ? mlx5e_trigger_napi_sched+0x40/0x40 [mlx5_core] ? dev_attr_show.cold+0x35/0x35 ? devlink_health_do_dump.part.0+0x174/0x340 ? devlink_health_report+0x504/0x810 ? mlx5e_reporter_tx_timeout+0x29d/0x3a0 [mlx5_core] ? mlx5e_tx_timeout_work+0x17c/0x230 [mlx5_core] ? process_one_work+0x680/0x1050 mlx5e_safe_switch_params+0x156/0x220 [mlx5_core] ? mlx5e_switch_priv_channels+0x310/0x310 [mlx5_core] ? mlx5_eq_poll_irq_disabled+0xb6/0x100 [mlx5_core] mlx5e_tx_reporter_timeout_recover+0x123/0x240 [mlx5_core] ? __mutex_unlock_slowpath.constprop.0+0x2b0/0x2b0 devlink_health_reporter_recover+0xa6/0x1f0 devlink_health_report+0x2f7/0x810 ? vsnprintf+0x854/0x15e0 mlx5e_reporter_tx_timeout+0x29d/0x3a0 [mlx5_core] ? mlx5e_reporter_tx_err_cqe+0x1a0/0x1a0 [mlx5_core] ? mlx5e_tx_reporter_timeout_dump+0x50/0x50 [mlx5_core] ? mlx5e_tx_reporter_dump_sq+0x260/0x260 [mlx5_core] ? newidle_balance+0x9b7/0xe30 ? psi_group_change+0x6a7/0xb80 ? mutex_lock+0x96/0xf0 ? __mutex_lock_slowpath+0x10/0x10 mlx5e_tx_timeout_work+0x17c/0x230 [mlx5_core] process_one_work+0x680/0x1050 worker_thread+0x5a0/0xeb0 ? process_one_work+0x1050/0x1050 kthread+0x2a2/0x340 ? kthread_complete_and_exit+0x20/0x20 ret_from_fork+0x22/0x30 Freed by task 1: kasan_save_stack+0x23/0x50 kasan_set_track+0x21/0x30 kasan_save_free_info+0x2a/0x40 ____kasan_slab_free+0x169/0x1d0 slab_free_freelist_hook+0xd2/0x190 __kmem_cache_free+0x1a1/0x2f0 irq_pool_free+0x138/0x200 [mlx5_core] mlx5_irq_table_destroy+0xf6/0x170 [mlx5_core] mlx5_core_eq_free_irqs+0x74/0xf0 [mlx5_core] shutdown+0x194/0x1aa [mlx5_core] pci_device_shutdown+0x75/0x120 device_shutdown+0x35c/0x620 kernel_restart+0x60/0xa0 __do_sys_reboot+0x1cb/0x2c0 do_syscall_64+0x3b/0x90 entry_SYSCALL_64_after_hwframe+0x4b/0xb5 The buggy address belongs to the object at ffff88823fc0d300 which belongs to the cache kmalloc-192 of size 192 The buggy address is located 24 bytes inside of 192-byte region [ffff88823fc0d300, ffff88823fc0d3c0) The buggy address belongs to the physical page: page:0000000010139587 refcount:1 mapcount:0 mapping:0000000000000000 index:0x0 pfn:0x23fc0c head:0000000010139587 order:1 compound_mapcount:0 compound_pincount:0 flags: 0x2ffff800010200(slab|head|node=0|zone=2|lastcpupid=0x1ffff) raw: 002ffff800010200 0000000000000000 dead000000000122 ffff88810004ca00 raw: 0000000000000000 0000000000200020 00000001ffffffff 0000000000000000 page dumped because: kasan: bad access detected Memory state around the buggy address: ffff88823fc0d200: fa fb fb fb fb fb fb fb fb fb fb fb fb fb fb fb ffff88823fc0d280: fb fb fb fb fb fb fb fb fc fc fc fc fc fc fc fc >ffff88823fc0d300: fa fb fb fb fb fb fb fb fb fb fb fb fb fb fb fb ^ ffff88823fc0d380: fb fb fb fb fb fb fb fb fc fc fc fc fc fc fc fc ffff88823fc0d400: 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 ================================================================== general protection fault, probably for non-canonical address 0xdffffc005c40d7ac: 0000 [#1] PREEMPT SMP KASAN NOPTI KASAN: probably user-memory-access in range [0x00000002e206bd60-0x00000002e206bd67] CPU: 25 PID: 13608 Comm: kworker/u192:0 Tainted: G B W O 6.1.21-cloudflare-kasan-2023.3.21 #1 Hardware name: GIGABYTE R162-R2-GEN0/MZ12-HD2-CD, BIOS R14 05/03/2021 Workqueue: mlx5e mlx5e_tx_timeout_work [mlx5_core] RIP: 0010:__alloc_pages+0x141/0x5c0 Call Trace: ? sysvec_apic_timer_interrupt+0xa0/0xc0 ? asm_sysvec_apic_timer_interrupt+0x16/0x20 ? __alloc_pages_slowpath.constprop.0+0x1ec0/0x1ec0 ? _raw_spin_unlock_irqrestore+0x3d/0x80 __kmalloc_large_node+0x80/0x120 ? kvmalloc_node+0x4e/0x170 __kmalloc_node+0xd4/0x150 kvmalloc_node+0x4e/0x170 mlx5e_open_channels+0x631/0x3a10 [mlx5_core] ? console_unlock+0x2fa/0x430 ? _raw_spin_lock_irqsave+0x8d/0xf0 ? _raw_spin_unlock_irqrestore+0x42/0x80 ? preempt_count_add+0x7d/0x150 ? __wake_up_klogd.part.0+0x7d/0xc0 ? vprintk_emit+0xfe/0x2c0 ? mlx5e_trigger_napi_sched+0x40/0x40 [mlx5_core] ? dev_attr_show.cold+0x35/0x35 ? devlink_health_do_dump.part.0+0x174/0x340 ? devlink_health_report+0x504/0x810 ? mlx5e_reporter_tx_timeout+0x29d/0x3a0 [mlx5_core] ? mlx5e_tx_timeout_work+0x17c/0x230 [mlx5_core] ? process_one_work+0x680/0x1050 mlx5e_safe_switch_params+0x156/0x220 [mlx5_core] ? mlx5e_switch_priv_channels+0x310/0x310 [mlx5_core] ? mlx5_eq_poll_irq_disabled+0xb6/0x100 [mlx5_core] mlx5e_tx_reporter_timeout_recover+0x123/0x240 [mlx5_core] ? __mutex_unlock_slowpath.constprop.0+0x2b0/0x2b0 devlink_health_reporter_recover+0xa6/0x1f0 devlink_health_report+0x2f7/0x810 ? vsnprintf+0x854/0x15e0 mlx5e_reporter_tx_timeout+0x29d/0x3a0 [mlx5_core] ? mlx5e_reporter_tx_err_cqe+0x1a0/0x1a0 [mlx5_core] ? mlx5e_tx_reporter_timeout_dump+0x50/0x50 [mlx5_core] ? mlx5e_tx_reporter_dump_sq+0x260/0x260 [mlx5_core] ? newidle_balance+0x9b7/0xe30 ? psi_group_change+0x6a7/0xb80 ? mutex_lock+0x96/0xf0 ? __mutex_lock_slowpath+0x10/0x10 mlx5e_tx_timeout_work+0x17c/0x230 [mlx5_core] process_one_work+0x680/0x1050 worker_thread+0x5a0/0xeb0 ? process_one_work+0x1050/0x1050 kthread+0x2a2/0x340 ? kthread_complete_and_exit+0x20/0x20 ret_from_fork+0x22/0x30 ---[ end trace 0000000000000000 ]--- RIP: 0010:__alloc_pages+0x141/0x5c0 Code: e0 39 a3 96 89 e9 b8 22 01 32 01 83 e1 0f 48 89 fa 01 c9 48 c1 ea 03 d3 f8 83 e0 03 89 44 24 6c 48 b8 00 00 00 00 00 fc ff df <80> 3c 02 00 0f 85 fc 03 00 00 89 e8 4a 8b 14 f5 e0 39 a3 96 4c 89 RSP: 0018:ffff888251f0f438 EFLAGS: 00010202 RAX: dffffc0000000000 RBX: 1ffff1104a3e1e8b RCX: 0000000000000000 RDX: 000000005c40d7ac RSI: 0000000000000003 RDI: 00000002e206bd60 RBP: 0000000000052dc0 R08: ffff8882b0044218 R09: ffff8882b0045e8a R10: fffffbfff300fefc R11: ffff888167af4000 R12: 0000000000000003 R13: 0000000000000000 R14: 00000000696c7070 R15: ffff8882373f4380 FS: 0000000000000000(0000) GS:ffff88bf2be80000(0000) knlGS:0000000000000000 CS: 0010 DS: 0000 ES: 0000 CR0: 0000000080050033 CR2: 00005641d031eee8 CR3: 0000002e7ca14000 CR4: 0000000000350ee0 Kernel panic - not syncing: Fatal exception Kernel Offset: 0x11000000 from 0xffffffff81000000 (relocation range: 0xffffffff80000000-0xffffffffbfffffff) ---[ end Kernel panic - not syncing: Fatal exception ]---] Reported-by: Frederick Lawler Link: https://lore.kernel.org/netdev/be5b9271-7507-19c5-ded1-fa78f1980e69@cloudflare.com Signed-off-by: Shay Drory Signed-off-by: Saeed Mahameed --- drivers/net/ethernet/mellanox/mlx5/core/eq.c | 2 +- .../ethernet/mellanox/mlx5/core/mlx5_irq.h | 1 + .../net/ethernet/mellanox/mlx5/core/pci_irq.c | 29 +++++++++++++++++++ 3 files changed, 31 insertions(+), 1 deletion(-) diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eq.c b/drivers/net/ethernet/mellanox/mlx5/core/eq.c index 1c35d721a31d..fe698c79616c 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/eq.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/eq.c @@ -1104,7 +1104,7 @@ void mlx5_core_eq_free_irqs(struct mlx5_core_dev *dev) struct mlx5_eq_table *table = dev->priv.eq_table; mutex_lock(&table->lock); /* sync with create/destroy_async_eq */ - mlx5_irq_table_destroy(dev); + mlx5_irq_table_free_irqs(dev); mutex_unlock(&table->lock); } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/mlx5_irq.h b/drivers/net/ethernet/mellanox/mlx5/core/mlx5_irq.h index efd0c299c5c7..aa403a5ea34e 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/mlx5_irq.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/mlx5_irq.h @@ -15,6 +15,7 @@ int mlx5_irq_table_init(struct mlx5_core_dev *dev); void mlx5_irq_table_cleanup(struct mlx5_core_dev *dev); int mlx5_irq_table_create(struct mlx5_core_dev *dev); void mlx5_irq_table_destroy(struct mlx5_core_dev *dev); +void mlx5_irq_table_free_irqs(struct mlx5_core_dev *dev); int mlx5_irq_table_get_num_comp(struct mlx5_irq_table *table); int mlx5_irq_table_get_sfs_vec(struct mlx5_irq_table *table); struct mlx5_irq_table *mlx5_irq_table_get(struct mlx5_core_dev *dev); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/pci_irq.c b/drivers/net/ethernet/mellanox/mlx5/core/pci_irq.c index 2245d3b2f393..ac1304c2d205 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/pci_irq.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/pci_irq.c @@ -691,6 +691,24 @@ static void irq_pools_destroy(struct mlx5_irq_table *table) irq_pool_free(table->pcif_pool); } +static void mlx5_irq_pool_free_irqs(struct mlx5_irq_pool *pool) +{ + struct mlx5_irq *irq; + unsigned long index; + + xa_for_each(&pool->irqs, index, irq) + free_irq(irq->map.virq, &irq->nh); +} + +static void mlx5_irq_pools_free_irqs(struct mlx5_irq_table *table) +{ + if (table->sf_ctrl_pool) { + mlx5_irq_pool_free_irqs(table->sf_comp_pool); + mlx5_irq_pool_free_irqs(table->sf_ctrl_pool); + } + mlx5_irq_pool_free_irqs(table->pcif_pool); +} + /* irq_table API */ int mlx5_irq_table_init(struct mlx5_core_dev *dev) @@ -774,6 +792,17 @@ void mlx5_irq_table_destroy(struct mlx5_core_dev *dev) pci_free_irq_vectors(dev->pdev); } +void mlx5_irq_table_free_irqs(struct mlx5_core_dev *dev) +{ + struct mlx5_irq_table *table = dev->priv.irq_table; + + if (mlx5_core_is_sf(dev)) + return; + + mlx5_irq_pools_free_irqs(table); + pci_free_irq_vectors(dev->pdev); +} + int mlx5_irq_table_get_sfs_vec(struct mlx5_irq_table *table) { if (table->sf_comp_pool) From ef8c063cf88e1a3d99ab4ada1cbab5ba7248a4f2 Mon Sep 17 00:00:00 2001 From: Shay Drory Date: Sun, 16 Apr 2023 08:54:04 +0300 Subject: [PATCH 45/79] net/mlx5: Fix irq affinity management The cited patch deny the user of changing the affinity of mlx5 irqs, which break backward compatibility. Hence, allow the user to change the affinity of mlx5 irqs. Fixes: bbac70c74183 ("net/mlx5: Use newer affinity descriptor") Signed-off-by: Shay Drory Reviewed-by: Eli Cohen Signed-off-by: Saeed Mahameed --- drivers/net/ethernet/mellanox/mlx5/core/pci_irq.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/net/ethernet/mellanox/mlx5/core/pci_irq.c b/drivers/net/ethernet/mellanox/mlx5/core/pci_irq.c index ac1304c2d205..86b528aae6d4 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/pci_irq.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/pci_irq.c @@ -567,7 +567,7 @@ int mlx5_irqs_request_vectors(struct mlx5_core_dev *dev, u16 *cpus, int nirqs, struct mlx5_irq *irq; int i; - af_desc.is_managed = 1; + af_desc.is_managed = false; for (i = 0; i < nirqs; i++) { cpumask_set_cpu(cpus[i], &af_desc.mask); irq = mlx5_irq_request(dev, i + 1, &af_desc, rmap); From 1da438c0ae02396dc5018b63237492cb5908608d Mon Sep 17 00:00:00 2001 From: Shay Drory Date: Mon, 17 Apr 2023 10:57:50 +0300 Subject: [PATCH 46/79] net/mlx5: Fix indexing of mlx5_irq After the cited patch, mlx5_irq xarray index can be different then mlx5_irq MSIX table index. Fix it by storing both mlx5_irq xarray index and MSIX table index. Fixes: 3354822cde5a ("net/mlx5: Use dynamic msix vectors allocation") Signed-off-by: Shay Drory Reviewed-by: Eli Cohen Signed-off-by: Saeed Mahameed --- drivers/net/ethernet/mellanox/mlx5/core/pci_irq.c | 9 +++++---- 1 file changed, 5 insertions(+), 4 deletions(-) diff --git a/drivers/net/ethernet/mellanox/mlx5/core/pci_irq.c b/drivers/net/ethernet/mellanox/mlx5/core/pci_irq.c index 86b528aae6d4..db5687d9fec9 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/pci_irq.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/pci_irq.c @@ -32,6 +32,7 @@ struct mlx5_irq { struct mlx5_irq_pool *pool; int refcount; struct msi_map map; + u32 pool_index; }; struct mlx5_irq_table { @@ -132,7 +133,7 @@ static void irq_release(struct mlx5_irq *irq) struct cpu_rmap *rmap; #endif - xa_erase(&pool->irqs, irq->map.index); + xa_erase(&pool->irqs, irq->pool_index); /* free_irq requires that affinity_hint and rmap will be cleared before * calling it. To satisfy this requirement, we call * irq_cpu_rmap_remove() to remove the notifier @@ -276,11 +277,11 @@ struct mlx5_irq *mlx5_irq_alloc(struct mlx5_irq_pool *pool, int i, } irq->pool = pool; irq->refcount = 1; - irq->map.index = i; - err = xa_err(xa_store(&pool->irqs, irq->map.index, irq, GFP_KERNEL)); + irq->pool_index = i; + err = xa_err(xa_store(&pool->irqs, irq->pool_index, irq, GFP_KERNEL)); if (err) { mlx5_core_err(dev, "Failed to alloc xa entry for irq(%u). err = %d\n", - irq->map.index, err); + irq->pool_index, err); goto err_xa; } return irq; From 600761245952d7f70280add6ce02894f1528992b Mon Sep 17 00:00:00 2001 From: Horatiu Vultur Date: Mon, 22 May 2023 14:00:38 +0200 Subject: [PATCH 47/79] lan966x: Fix unloading/loading of the driver It was noticing that after a while when unloading/loading the driver and sending traffic through the switch, it would stop working. It would stop forwarding any traffic and the only way to get out of this was to do a power cycle of the board. The root cause seems to be that the switch core is initialized twice. Apparently initializing twice the switch core disturbs the pointers in the queue systems in the HW, so after a while it would stop sending the traffic. Unfortunetly, it is not possible to use a reset of the switch here, because the reset line is connected to multiple devices like MDIO, SGPIO, FAN, etc. So then all the devices will get reseted when the network driver will be loaded. So the fix is to check if the core is initialized already and if that is the case don't initialize it again. Fixes: db8bcaad5393 ("net: lan966x: add the basic lan966x driver") Signed-off-by: Horatiu Vultur Reviewed-by: Simon Horman Link: https://lore.kernel.org/r/20230522120038.3749026-1-horatiu.vultur@microchip.com Signed-off-by: Paolo Abeni --- drivers/net/ethernet/microchip/lan966x/lan966x_main.c | 10 ++++++++++ 1 file changed, 10 insertions(+) diff --git a/drivers/net/ethernet/microchip/lan966x/lan966x_main.c b/drivers/net/ethernet/microchip/lan966x/lan966x_main.c index 2b6e046e1d10..ee2698698d71 100644 --- a/drivers/net/ethernet/microchip/lan966x/lan966x_main.c +++ b/drivers/net/ethernet/microchip/lan966x/lan966x_main.c @@ -1039,6 +1039,16 @@ static int lan966x_reset_switch(struct lan966x *lan966x) reset_control_reset(switch_reset); + /* Don't reinitialize the switch core, if it is already initialized. In + * case it is initialized twice, some pointers inside the queue system + * in HW will get corrupted and then after a while the queue system gets + * full and no traffic is passing through the switch. The issue is seen + * when loading and unloading the driver and sending traffic through the + * switch. + */ + if (lan_rd(lan966x, SYS_RESET_CFG) & SYS_RESET_CFG_CORE_ENA) + return 0; + lan_wr(SYS_RESET_CFG_CORE_ENA_SET(0), lan966x, SYS_RESET_CFG); lan_wr(SYS_RAM_INIT_RAM_INIT_SET(1), lan966x, SYS_RAM_INIT); ret = readx_poll_timeout(lan966x_ram_init, lan966x, From 3632679d9e4f879f49949bb5b050e0de553e4739 Mon Sep 17 00:00:00 2001 From: Nicolas Dichtel Date: Mon, 22 May 2023 14:08:20 +0200 Subject: [PATCH 48/79] ipv{4,6}/raw: fix output xfrm lookup wrt protocol With a raw socket bound to IPPROTO_RAW (ie with hdrincl enabled), the protocol field of the flow structure, build by raw_sendmsg() / rawv6_sendmsg()), is set to IPPROTO_RAW. This breaks the ipsec policy lookup when some policies are defined with a protocol in the selector. For ipv6, the sin6_port field from 'struct sockaddr_in6' could be used to specify the protocol. Just accept all values for IPPROTO_RAW socket. For ipv4, the sin_port field of 'struct sockaddr_in' could not be used without breaking backward compatibility (the value of this field was never checked). Let's add a new kind of control message, so that the userland could specify which protocol is used. Fixes: 1da177e4c3f4 ("Linux-2.6.12-rc2") CC: stable@vger.kernel.org Signed-off-by: Nicolas Dichtel Link: https://lore.kernel.org/r/20230522120820.1319391-1-nicolas.dichtel@6wind.com Signed-off-by: Paolo Abeni --- include/net/ip.h | 2 ++ include/uapi/linux/in.h | 1 + net/ipv4/ip_sockglue.c | 12 +++++++++++- net/ipv4/raw.c | 5 ++++- net/ipv6/raw.c | 3 ++- 5 files changed, 20 insertions(+), 3 deletions(-) diff --git a/include/net/ip.h b/include/net/ip.h index c3fffaa92d6e..acec504c469a 100644 --- a/include/net/ip.h +++ b/include/net/ip.h @@ -76,6 +76,7 @@ struct ipcm_cookie { __be32 addr; int oif; struct ip_options_rcu *opt; + __u8 protocol; __u8 ttl; __s16 tos; char priority; @@ -96,6 +97,7 @@ static inline void ipcm_init_sk(struct ipcm_cookie *ipcm, ipcm->sockc.tsflags = inet->sk.sk_tsflags; ipcm->oif = READ_ONCE(inet->sk.sk_bound_dev_if); ipcm->addr = inet->inet_saddr; + ipcm->protocol = inet->inet_num; } #define IPCB(skb) ((struct inet_skb_parm*)((skb)->cb)) diff --git a/include/uapi/linux/in.h b/include/uapi/linux/in.h index 4b7f2df66b99..e682ab628dfa 100644 --- a/include/uapi/linux/in.h +++ b/include/uapi/linux/in.h @@ -163,6 +163,7 @@ struct in_addr { #define IP_MULTICAST_ALL 49 #define IP_UNICAST_IF 50 #define IP_LOCAL_PORT_RANGE 51 +#define IP_PROTOCOL 52 #define MCAST_EXCLUDE 0 #define MCAST_INCLUDE 1 diff --git a/net/ipv4/ip_sockglue.c b/net/ipv4/ip_sockglue.c index b511ff0adc0a..8e97d8d4cc9d 100644 --- a/net/ipv4/ip_sockglue.c +++ b/net/ipv4/ip_sockglue.c @@ -317,7 +317,14 @@ int ip_cmsg_send(struct sock *sk, struct msghdr *msg, struct ipcm_cookie *ipc, ipc->tos = val; ipc->priority = rt_tos2priority(ipc->tos); break; - + case IP_PROTOCOL: + if (cmsg->cmsg_len != CMSG_LEN(sizeof(int))) + return -EINVAL; + val = *(int *)CMSG_DATA(cmsg); + if (val < 1 || val > 255) + return -EINVAL; + ipc->protocol = val; + break; default: return -EINVAL; } @@ -1761,6 +1768,9 @@ int do_ip_getsockopt(struct sock *sk, int level, int optname, case IP_LOCAL_PORT_RANGE: val = inet->local_port_range.hi << 16 | inet->local_port_range.lo; break; + case IP_PROTOCOL: + val = inet_sk(sk)->inet_num; + break; default: sockopt_release_sock(sk); return -ENOPROTOOPT; diff --git a/net/ipv4/raw.c b/net/ipv4/raw.c index ff712bf2a98d..eadf1c9ef7e4 100644 --- a/net/ipv4/raw.c +++ b/net/ipv4/raw.c @@ -532,6 +532,9 @@ static int raw_sendmsg(struct sock *sk, struct msghdr *msg, size_t len) } ipcm_init_sk(&ipc, inet); + /* Keep backward compat */ + if (hdrincl) + ipc.protocol = IPPROTO_RAW; if (msg->msg_controllen) { err = ip_cmsg_send(sk, msg, &ipc, false); @@ -599,7 +602,7 @@ static int raw_sendmsg(struct sock *sk, struct msghdr *msg, size_t len) flowi4_init_output(&fl4, ipc.oif, ipc.sockc.mark, tos, RT_SCOPE_UNIVERSE, - hdrincl ? IPPROTO_RAW : sk->sk_protocol, + hdrincl ? ipc.protocol : sk->sk_protocol, inet_sk_flowi_flags(sk) | (hdrincl ? FLOWI_FLAG_KNOWN_NH : 0), daddr, saddr, 0, 0, sk->sk_uid); diff --git a/net/ipv6/raw.c b/net/ipv6/raw.c index 7d0adb612bdd..44ee7a2e72ac 100644 --- a/net/ipv6/raw.c +++ b/net/ipv6/raw.c @@ -793,7 +793,8 @@ static int rawv6_sendmsg(struct sock *sk, struct msghdr *msg, size_t len) if (!proto) proto = inet->inet_num; - else if (proto != inet->inet_num) + else if (proto != inet->inet_num && + inet->inet_num != IPPROTO_RAW) return -EINVAL; if (proto > 255) From 78fa0d61d97a728d306b0c23d353c0e340756437 Mon Sep 17 00:00:00 2001 From: John Fastabend Date: Mon, 22 May 2023 19:56:05 -0700 Subject: [PATCH 49/79] bpf, sockmap: Pass skb ownership through read_skb The read_skb hook calls consume_skb() now, but this means that if the recv_actor program wants to use the skb it needs to inc the ref cnt so that the consume_skb() doesn't kfree the sk_buff. This is problematic because in some error cases under memory pressure we may need to linearize the sk_buff from sk_psock_skb_ingress_enqueue(). Then we get this, skb_linearize() __pskb_pull_tail() pskb_expand_head() BUG_ON(skb_shared(skb)) Because we incremented users refcnt from sk_psock_verdict_recv() we hit the bug on with refcnt > 1 and trip it. To fix lets simply pass ownership of the sk_buff through the skb_read call. Then we can drop the consume from read_skb handlers and assume the verdict recv does any required kfree. Bug found while testing in our CI which runs in VMs that hit memory constraints rather regularly. William tested TCP read_skb handlers. [ 106.536188] ------------[ cut here ]------------ [ 106.536197] kernel BUG at net/core/skbuff.c:1693! [ 106.536479] invalid opcode: 0000 [#1] PREEMPT SMP PTI [ 106.536726] CPU: 3 PID: 1495 Comm: curl Not tainted 5.19.0-rc5 #1 [ 106.537023] Hardware name: QEMU Standard PC (i440FX + PIIX, 1996), BIOS ArchLinux 1.16.0-1 04/01/2014 [ 106.537467] RIP: 0010:pskb_expand_head+0x269/0x330 [ 106.538585] RSP: 0018:ffffc90000138b68 EFLAGS: 00010202 [ 106.538839] RAX: 000000000000003f RBX: ffff8881048940e8 RCX: 0000000000000a20 [ 106.539186] RDX: 0000000000000002 RSI: 0000000000000000 RDI: ffff8881048940e8 [ 106.539529] RBP: ffffc90000138be8 R08: 00000000e161fd1a R09: 0000000000000000 [ 106.539877] R10: 0000000000000018 R11: 0000000000000000 R12: ffff8881048940e8 [ 106.540222] R13: 0000000000000003 R14: 0000000000000000 R15: ffff8881048940e8 [ 106.540568] FS: 00007f277dde9f00(0000) GS:ffff88813bd80000(0000) knlGS:0000000000000000 [ 106.540954] CS: 0010 DS: 0000 ES: 0000 CR0: 0000000080050033 [ 106.541227] CR2: 00007f277eeede64 CR3: 000000000ad3e000 CR4: 00000000000006e0 [ 106.541569] DR0: 0000000000000000 DR1: 0000000000000000 DR2: 0000000000000000 [ 106.541915] DR3: 0000000000000000 DR6: 00000000fffe0ff0 DR7: 0000000000000400 [ 106.542255] Call Trace: [ 106.542383] [ 106.542487] __pskb_pull_tail+0x4b/0x3e0 [ 106.542681] skb_ensure_writable+0x85/0xa0 [ 106.542882] sk_skb_pull_data+0x18/0x20 [ 106.543084] bpf_prog_b517a65a242018b0_bpf_skskb_http_verdict+0x3a9/0x4aa9 [ 106.543536] ? migrate_disable+0x66/0x80 [ 106.543871] sk_psock_verdict_recv+0xe2/0x310 [ 106.544258] ? sk_psock_write_space+0x1f0/0x1f0 [ 106.544561] tcp_read_skb+0x7b/0x120 [ 106.544740] tcp_data_queue+0x904/0xee0 [ 106.544931] tcp_rcv_established+0x212/0x7c0 [ 106.545142] tcp_v4_do_rcv+0x174/0x2a0 [ 106.545326] tcp_v4_rcv+0xe70/0xf60 [ 106.545500] ip_protocol_deliver_rcu+0x48/0x290 [ 106.545744] ip_local_deliver_finish+0xa7/0x150 Fixes: 04919bed948dc ("tcp: Introduce tcp_read_skb()") Reported-by: William Findlay Signed-off-by: John Fastabend Signed-off-by: Daniel Borkmann Tested-by: William Findlay Reviewed-by: Jakub Sitnicki Link: https://lore.kernel.org/bpf/20230523025618.113937-2-john.fastabend@gmail.com --- net/core/skmsg.c | 2 -- net/ipv4/tcp.c | 1 - net/ipv4/udp.c | 7 ++----- net/unix/af_unix.c | 7 ++----- net/vmw_vsock/virtio_transport_common.c | 5 +---- 5 files changed, 5 insertions(+), 17 deletions(-) diff --git a/net/core/skmsg.c b/net/core/skmsg.c index f81883759d38..4a3dc8d27295 100644 --- a/net/core/skmsg.c +++ b/net/core/skmsg.c @@ -1183,8 +1183,6 @@ static int sk_psock_verdict_recv(struct sock *sk, struct sk_buff *skb) int ret = __SK_DROP; int len = skb->len; - skb_get(skb); - rcu_read_lock(); psock = sk_psock(sk); if (unlikely(!psock)) { diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c index 4d6392c16b7a..e914e3446377 100644 --- a/net/ipv4/tcp.c +++ b/net/ipv4/tcp.c @@ -1773,7 +1773,6 @@ int tcp_read_skb(struct sock *sk, skb_read_actor_t recv_actor) WARN_ON_ONCE(!skb_set_owner_sk_safe(skb, sk)); tcp_flags = TCP_SKB_CB(skb)->tcp_flags; used = recv_actor(sk, skb); - consume_skb(skb); if (used < 0) { if (!copied) copied = used; diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c index aa32afd871ee..9482def1f310 100644 --- a/net/ipv4/udp.c +++ b/net/ipv4/udp.c @@ -1818,7 +1818,7 @@ EXPORT_SYMBOL(__skb_recv_udp); int udp_read_skb(struct sock *sk, skb_read_actor_t recv_actor) { struct sk_buff *skb; - int err, copied; + int err; try_again: skb = skb_recv_udp(sk, MSG_DONTWAIT, &err); @@ -1837,10 +1837,7 @@ try_again: } WARN_ON_ONCE(!skb_set_owner_sk_safe(skb, sk)); - copied = recv_actor(sk, skb); - kfree_skb(skb); - - return copied; + return recv_actor(sk, skb); } EXPORT_SYMBOL(udp_read_skb); diff --git a/net/unix/af_unix.c b/net/unix/af_unix.c index cc695c9f09ec..e7728b57a8c7 100644 --- a/net/unix/af_unix.c +++ b/net/unix/af_unix.c @@ -2553,7 +2553,7 @@ static int unix_read_skb(struct sock *sk, skb_read_actor_t recv_actor) { struct unix_sock *u = unix_sk(sk); struct sk_buff *skb; - int err, copied; + int err; mutex_lock(&u->iolock); skb = skb_recv_datagram(sk, MSG_DONTWAIT, &err); @@ -2561,10 +2561,7 @@ static int unix_read_skb(struct sock *sk, skb_read_actor_t recv_actor) if (!skb) return err; - copied = recv_actor(sk, skb); - kfree_skb(skb); - - return copied; + return recv_actor(sk, skb); } /* diff --git a/net/vmw_vsock/virtio_transport_common.c b/net/vmw_vsock/virtio_transport_common.c index e4878551f140..b769fc258931 100644 --- a/net/vmw_vsock/virtio_transport_common.c +++ b/net/vmw_vsock/virtio_transport_common.c @@ -1441,7 +1441,6 @@ int virtio_transport_read_skb(struct vsock_sock *vsk, skb_read_actor_t recv_acto struct sock *sk = sk_vsock(vsk); struct sk_buff *skb; int off = 0; - int copied; int err; spin_lock_bh(&vvs->rx_lock); @@ -1454,9 +1453,7 @@ int virtio_transport_read_skb(struct vsock_sock *vsk, skb_read_actor_t recv_acto if (!skb) return err; - copied = recv_actor(sk, skb); - kfree_skb(skb); - return copied; + return recv_actor(sk, skb); } EXPORT_SYMBOL_GPL(virtio_transport_read_skb); From 29173d07f79883ac94f5570294f98af3d4287382 Mon Sep 17 00:00:00 2001 From: John Fastabend Date: Mon, 22 May 2023 19:56:06 -0700 Subject: [PATCH 50/79] bpf, sockmap: Convert schedule_work into delayed_work Sk_buffs are fed into sockmap verdict programs either from a strparser (when the user might want to decide how framing of skb is done by attaching another parser program) or directly through tcp_read_sock. The tcp_read_sock is the preferred method for performance when the BPF logic is a stream parser. The flow for Cilium's common use case with a stream parser is, tcp_read_sock() sk_psock_verdict_recv ret = bpf_prog_run_pin_on_cpu() sk_psock_verdict_apply(sock, skb, ret) // if system is under memory pressure or app is slow we may // need to queue skb. Do this queuing through ingress_skb and // then kick timer to wake up handler skb_queue_tail(ingress_skb, skb) schedule_work(work); The work queue is wired up to sk_psock_backlog(). This will then walk the ingress_skb skb list that holds our sk_buffs that could not be handled, but should be OK to run at some later point. However, its possible that the workqueue doing this work still hits an error when sending the skb. When this happens the skbuff is requeued on a temporary 'state' struct kept with the workqueue. This is necessary because its possible to partially send an skbuff before hitting an error and we need to know how and where to restart when the workqueue runs next. Now for the trouble, we don't rekick the workqueue. This can cause a stall where the skbuff we just cached on the state variable might never be sent. This happens when its the last packet in a flow and no further packets come along that would cause the system to kick the workqueue from that side. To fix we could do simple schedule_work(), but while under memory pressure it makes sense to back off some instead of continue to retry repeatedly. So instead to fix convert schedule_work to schedule_delayed_work and add backoff logic to reschedule from backlog queue on errors. Its not obvious though what a good backoff is so use '1'. To test we observed some flakes whil running NGINX compliance test with sockmap we attributed these failed test to this bug and subsequent issue. >From on list discussion. This commit bec217197b41("skmsg: Schedule psock work if the cached skb exists on the psock") was intended to address similar race, but had a couple cases it missed. Most obvious it only accounted for receiving traffic on the local socket so if redirecting into another socket we could still get an sk_buff stuck here. Next it missed the case where copied=0 in the recv() handler and then we wouldn't kick the scheduler. Also its sub-optimal to require userspace to kick the internal mechanisms of sockmap to wake it up and copy data to user. It results in an extra syscall and requires the app to actual handle the EAGAIN correctly. Fixes: 04919bed948dc ("tcp: Introduce tcp_read_skb()") Signed-off-by: John Fastabend Signed-off-by: Daniel Borkmann Tested-by: William Findlay Reviewed-by: Jakub Sitnicki Link: https://lore.kernel.org/bpf/20230523025618.113937-3-john.fastabend@gmail.com --- include/linux/skmsg.h | 2 +- net/core/skmsg.c | 21 ++++++++++++++------- net/core/sock_map.c | 3 ++- 3 files changed, 17 insertions(+), 9 deletions(-) diff --git a/include/linux/skmsg.h b/include/linux/skmsg.h index 84f787416a54..904ff9a32ad6 100644 --- a/include/linux/skmsg.h +++ b/include/linux/skmsg.h @@ -105,7 +105,7 @@ struct sk_psock { struct proto *sk_proto; struct mutex work_mutex; struct sk_psock_work_state work_state; - struct work_struct work; + struct delayed_work work; struct rcu_work rwork; }; diff --git a/net/core/skmsg.c b/net/core/skmsg.c index 4a3dc8d27295..0a9ee2acac0b 100644 --- a/net/core/skmsg.c +++ b/net/core/skmsg.c @@ -482,7 +482,7 @@ int sk_msg_recvmsg(struct sock *sk, struct sk_psock *psock, struct msghdr *msg, } out: if (psock->work_state.skb && copied > 0) - schedule_work(&psock->work); + schedule_delayed_work(&psock->work, 0); return copied; } EXPORT_SYMBOL_GPL(sk_msg_recvmsg); @@ -640,7 +640,8 @@ static void sk_psock_skb_state(struct sk_psock *psock, static void sk_psock_backlog(struct work_struct *work) { - struct sk_psock *psock = container_of(work, struct sk_psock, work); + struct delayed_work *dwork = to_delayed_work(work); + struct sk_psock *psock = container_of(dwork, struct sk_psock, work); struct sk_psock_work_state *state = &psock->work_state; struct sk_buff *skb = NULL; bool ingress; @@ -680,6 +681,12 @@ start: if (ret == -EAGAIN) { sk_psock_skb_state(psock, state, skb, len, off); + + /* Delay slightly to prioritize any + * other work that might be here. + */ + if (sk_psock_test_state(psock, SK_PSOCK_TX_ENABLED)) + schedule_delayed_work(&psock->work, 1); goto end; } /* Hard errors break pipe and stop xmit. */ @@ -734,7 +741,7 @@ struct sk_psock *sk_psock_init(struct sock *sk, int node) INIT_LIST_HEAD(&psock->link); spin_lock_init(&psock->link_lock); - INIT_WORK(&psock->work, sk_psock_backlog); + INIT_DELAYED_WORK(&psock->work, sk_psock_backlog); mutex_init(&psock->work_mutex); INIT_LIST_HEAD(&psock->ingress_msg); spin_lock_init(&psock->ingress_lock); @@ -823,7 +830,7 @@ static void sk_psock_destroy(struct work_struct *work) sk_psock_done_strp(psock); - cancel_work_sync(&psock->work); + cancel_delayed_work_sync(&psock->work); mutex_destroy(&psock->work_mutex); psock_progs_drop(&psock->progs); @@ -938,7 +945,7 @@ static int sk_psock_skb_redirect(struct sk_psock *from, struct sk_buff *skb) } skb_queue_tail(&psock_other->ingress_skb, skb); - schedule_work(&psock_other->work); + schedule_delayed_work(&psock_other->work, 0); spin_unlock_bh(&psock_other->ingress_lock); return 0; } @@ -1018,7 +1025,7 @@ static int sk_psock_verdict_apply(struct sk_psock *psock, struct sk_buff *skb, spin_lock_bh(&psock->ingress_lock); if (sk_psock_test_state(psock, SK_PSOCK_TX_ENABLED)) { skb_queue_tail(&psock->ingress_skb, skb); - schedule_work(&psock->work); + schedule_delayed_work(&psock->work, 0); err = 0; } spin_unlock_bh(&psock->ingress_lock); @@ -1049,7 +1056,7 @@ static void sk_psock_write_space(struct sock *sk) psock = sk_psock(sk); if (likely(psock)) { if (sk_psock_test_state(psock, SK_PSOCK_TX_ENABLED)) - schedule_work(&psock->work); + schedule_delayed_work(&psock->work, 0); write_space = psock->saved_write_space; } rcu_read_unlock(); diff --git a/net/core/sock_map.c b/net/core/sock_map.c index 7c189c2e2fbf..00afb66cd095 100644 --- a/net/core/sock_map.c +++ b/net/core/sock_map.c @@ -1644,9 +1644,10 @@ void sock_map_close(struct sock *sk, long timeout) rcu_read_unlock(); sk_psock_stop(psock); release_sock(sk); - cancel_work_sync(&psock->work); + cancel_delayed_work_sync(&psock->work); sk_psock_put(sk, psock); } + /* Make sure we do not recurse. This is a bug. * Leak the socket instead of crashing on a stack overflow. */ From bce22552f92ea7c577f49839b8e8f7d29afaf880 Mon Sep 17 00:00:00 2001 From: John Fastabend Date: Mon, 22 May 2023 19:56:07 -0700 Subject: [PATCH 51/79] bpf, sockmap: Reschedule is now done through backlog Now that the backlog manages the reschedule() logic correctly we can drop the partial fix to reschedule from recvmsg hook. Rescheduling on recvmsg hook was added to address a corner case where we still had data in the backlog state but had nothing to kick it and reschedule the backlog worker to run and finish copying data out of the state. This had a couple limitations, first it required user space to kick it introducing an unnecessary EBUSY and retry. Second it only handled the ingress case and egress redirects would still be hung. With the correct fix, pushing the reschedule logic down to where the enomem error occurs we can drop this fix. Fixes: bec217197b412 ("skmsg: Schedule psock work if the cached skb exists on the psock") Signed-off-by: John Fastabend Signed-off-by: Daniel Borkmann Reviewed-by: Jakub Sitnicki Link: https://lore.kernel.org/bpf/20230523025618.113937-4-john.fastabend@gmail.com --- net/core/skmsg.c | 2 -- 1 file changed, 2 deletions(-) diff --git a/net/core/skmsg.c b/net/core/skmsg.c index 0a9ee2acac0b..76ff15f8bb06 100644 --- a/net/core/skmsg.c +++ b/net/core/skmsg.c @@ -481,8 +481,6 @@ int sk_msg_recvmsg(struct sock *sk, struct sk_psock *psock, struct msghdr *msg, msg_rx = sk_psock_peek_msg(psock); } out: - if (psock->work_state.skb && copied > 0) - schedule_delayed_work(&psock->work, 0); return copied; } EXPORT_SYMBOL_GPL(sk_msg_recvmsg); From 405df89dd52cbcd69a3cd7d9a10d64de38f854b2 Mon Sep 17 00:00:00 2001 From: John Fastabend Date: Mon, 22 May 2023 19:56:08 -0700 Subject: [PATCH 52/79] bpf, sockmap: Improved check for empty queue We noticed some rare sk_buffs were stepping past the queue when system was under memory pressure. The general theory is to skip enqueueing sk_buffs when its not necessary which is the normal case with a system that is properly provisioned for the task, no memory pressure and enough cpu assigned. But, if we can't allocate memory due to an ENOMEM error when enqueueing the sk_buff into the sockmap receive queue we push it onto a delayed workqueue to retry later. When a new sk_buff is received we then check if that queue is empty. However, there is a problem with simply checking the queue length. When a sk_buff is being processed from the ingress queue but not yet on the sockmap msg receive queue its possible to also recv a sk_buff through normal path. It will check the ingress queue which is zero and then skip ahead of the pkt being processed. Previously we used sock lock from both contexts which made the problem harder to hit, but not impossible. To fix instead of popping the skb from the queue entirely we peek the skb from the queue and do the copy there. This ensures checks to the queue length are non-zero while skb is being processed. Then finally when the entire skb has been copied to user space queue or another socket we pop it off the queue. This way the queue length check allows bypassing the queue only after the list has been completely processed. To reproduce issue we run NGINX compliance test with sockmap running and observe some flakes in our testing that we attributed to this issue. Fixes: 04919bed948dc ("tcp: Introduce tcp_read_skb()") Suggested-by: Jakub Sitnicki Signed-off-by: John Fastabend Signed-off-by: Daniel Borkmann Tested-by: William Findlay Reviewed-by: Jakub Sitnicki Link: https://lore.kernel.org/bpf/20230523025618.113937-5-john.fastabend@gmail.com --- include/linux/skmsg.h | 1 - net/core/skmsg.c | 32 ++++++++------------------------ 2 files changed, 8 insertions(+), 25 deletions(-) diff --git a/include/linux/skmsg.h b/include/linux/skmsg.h index 904ff9a32ad6..054d7911bfc9 100644 --- a/include/linux/skmsg.h +++ b/include/linux/skmsg.h @@ -71,7 +71,6 @@ struct sk_psock_link { }; struct sk_psock_work_state { - struct sk_buff *skb; u32 len; u32 off; }; diff --git a/net/core/skmsg.c b/net/core/skmsg.c index 76ff15f8bb06..bcd45a99a3db 100644 --- a/net/core/skmsg.c +++ b/net/core/skmsg.c @@ -622,16 +622,12 @@ static int sk_psock_handle_skb(struct sk_psock *psock, struct sk_buff *skb, static void sk_psock_skb_state(struct sk_psock *psock, struct sk_psock_work_state *state, - struct sk_buff *skb, int len, int off) { spin_lock_bh(&psock->ingress_lock); if (sk_psock_test_state(psock, SK_PSOCK_TX_ENABLED)) { - state->skb = skb; state->len = len; state->off = off; - } else { - sock_drop(psock->sk, skb); } spin_unlock_bh(&psock->ingress_lock); } @@ -642,23 +638,17 @@ static void sk_psock_backlog(struct work_struct *work) struct sk_psock *psock = container_of(dwork, struct sk_psock, work); struct sk_psock_work_state *state = &psock->work_state; struct sk_buff *skb = NULL; + u32 len = 0, off = 0; bool ingress; - u32 len, off; int ret; mutex_lock(&psock->work_mutex); - if (unlikely(state->skb)) { - spin_lock_bh(&psock->ingress_lock); - skb = state->skb; + if (unlikely(state->len)) { len = state->len; off = state->off; - state->skb = NULL; - spin_unlock_bh(&psock->ingress_lock); } - if (skb) - goto start; - while ((skb = skb_dequeue(&psock->ingress_skb))) { + while ((skb = skb_peek(&psock->ingress_skb))) { len = skb->len; off = 0; if (skb_bpf_strparser(skb)) { @@ -667,7 +657,6 @@ static void sk_psock_backlog(struct work_struct *work) off = stm->offset; len = stm->full_len; } -start: ingress = skb_bpf_ingress(skb); skb_bpf_redirect_clear(skb); do { @@ -677,8 +666,7 @@ start: len, ingress); if (ret <= 0) { if (ret == -EAGAIN) { - sk_psock_skb_state(psock, state, skb, - len, off); + sk_psock_skb_state(psock, state, len, off); /* Delay slightly to prioritize any * other work that might be here. @@ -690,15 +678,16 @@ start: /* Hard errors break pipe and stop xmit. */ sk_psock_report_error(psock, ret ? -ret : EPIPE); sk_psock_clear_state(psock, SK_PSOCK_TX_ENABLED); - sock_drop(psock->sk, skb); goto end; } off += ret; len -= ret; } while (len); - if (!ingress) + skb = skb_dequeue(&psock->ingress_skb); + if (!ingress) { kfree_skb(skb); + } } end: mutex_unlock(&psock->work_mutex); @@ -791,11 +780,6 @@ static void __sk_psock_zap_ingress(struct sk_psock *psock) skb_bpf_redirect_clear(skb); sock_drop(psock->sk, skb); } - kfree_skb(psock->work_state.skb); - /* We null the skb here to ensure that calls to sk_psock_backlog - * do not pick up the free'd skb. - */ - psock->work_state.skb = NULL; __sk_psock_purge_ingress_msg(psock); } @@ -814,7 +798,6 @@ void sk_psock_stop(struct sk_psock *psock) spin_lock_bh(&psock->ingress_lock); sk_psock_clear_state(psock, SK_PSOCK_TX_ENABLED); sk_psock_cork_free(psock); - __sk_psock_zap_ingress(psock); spin_unlock_bh(&psock->ingress_lock); } @@ -829,6 +812,7 @@ static void sk_psock_destroy(struct work_struct *work) sk_psock_done_strp(psock); cancel_delayed_work_sync(&psock->work); + __sk_psock_zap_ingress(psock); mutex_destroy(&psock->work_mutex); psock_progs_drop(&psock->progs); From 901546fd8f9ca4b5c481ce00928ab425ce9aacc0 Mon Sep 17 00:00:00 2001 From: John Fastabend Date: Mon, 22 May 2023 19:56:09 -0700 Subject: [PATCH 53/79] bpf, sockmap: Handle fin correctly The sockmap code is returning EAGAIN after a FIN packet is received and no more data is on the receive queue. Correct behavior is to return 0 to the user and the user can then close the socket. The EAGAIN causes many apps to retry which masks the problem. Eventually the socket is evicted from the sockmap because its released from sockmap sock free handling. The issue creates a delay and can cause some errors on application side. To fix this check on sk_msg_recvmsg side if length is zero and FIN flag is set then set return to zero. A selftest will be added to check this condition. Fixes: 04919bed948dc ("tcp: Introduce tcp_read_skb()") Signed-off-by: John Fastabend Signed-off-by: Daniel Borkmann Tested-by: William Findlay Reviewed-by: Jakub Sitnicki Link: https://lore.kernel.org/bpf/20230523025618.113937-6-john.fastabend@gmail.com --- net/ipv4/tcp_bpf.c | 31 +++++++++++++++++++++++++++++++ 1 file changed, 31 insertions(+) diff --git a/net/ipv4/tcp_bpf.c b/net/ipv4/tcp_bpf.c index 2e9547467edb..73c13642d47f 100644 --- a/net/ipv4/tcp_bpf.c +++ b/net/ipv4/tcp_bpf.c @@ -174,6 +174,24 @@ static int tcp_msg_wait_data(struct sock *sk, struct sk_psock *psock, return ret; } +static bool is_next_msg_fin(struct sk_psock *psock) +{ + struct scatterlist *sge; + struct sk_msg *msg_rx; + int i; + + msg_rx = sk_psock_peek_msg(psock); + i = msg_rx->sg.start; + sge = sk_msg_elem(msg_rx, i); + if (!sge->length) { + struct sk_buff *skb = msg_rx->skb; + + if (skb && TCP_SKB_CB(skb)->tcp_flags & TCPHDR_FIN) + return true; + } + return false; +} + static int tcp_bpf_recvmsg_parser(struct sock *sk, struct msghdr *msg, size_t len, @@ -196,6 +214,19 @@ static int tcp_bpf_recvmsg_parser(struct sock *sk, lock_sock(sk); msg_bytes_ready: copied = sk_msg_recvmsg(sk, psock, msg, len, flags); + /* The typical case for EFAULT is the socket was gracefully + * shutdown with a FIN pkt. So check here the other case is + * some error on copy_page_to_iter which would be unexpected. + * On fin return correct return code to zero. + */ + if (copied == -EFAULT) { + bool is_fin = is_next_msg_fin(psock); + + if (is_fin) { + copied = 0; + goto out; + } + } if (!copied) { long timeo; int data; From ea444185a6bf7da4dd0df1598ee953e4f7174858 Mon Sep 17 00:00:00 2001 From: John Fastabend Date: Mon, 22 May 2023 19:56:10 -0700 Subject: [PATCH 54/79] bpf, sockmap: TCP data stall on recv before accept A common mechanism to put a TCP socket into the sockmap is to hook the BPF_SOCK_OPS_{ACTIVE_PASSIVE}_ESTABLISHED_CB event with a BPF program that can map the socket info to the correct BPF verdict parser. When the user adds the socket to the map the psock is created and the new ops are assigned to ensure the verdict program will 'see' the sk_buffs as they arrive. Part of this process hooks the sk_data_ready op with a BPF specific handler to wake up the BPF verdict program when data is ready to read. The logic is simple enough (posted here for easy reading) static void sk_psock_verdict_data_ready(struct sock *sk) { struct socket *sock = sk->sk_socket; if (unlikely(!sock || !sock->ops || !sock->ops->read_skb)) return; sock->ops->read_skb(sk, sk_psock_verdict_recv); } The oversight here is sk->sk_socket is not assigned until the application accepts() the new socket. However, its entirely ok for the peer application to do a connect() followed immediately by sends. The socket on the receiver is sitting on the backlog queue of the listening socket until its accepted and the data is queued up. If the peer never accepts the socket or is slow it will eventually hit data limits and rate limit the session. But, important for BPF sockmap hooks when this data is received TCP stack does the sk_data_ready() call but the read_skb() for this data is never called because sk_socket is missing. The data sits on the sk_receive_queue. Then once the socket is accepted if we never receive more data from the peer there will be no further sk_data_ready calls and all the data is still on the sk_receive_queue(). Then user calls recvmsg after accept() and for TCP sockets in sockmap we use the tcp_bpf_recvmsg_parser() handler. The handler checks for data in the sk_msg ingress queue expecting that the BPF program has already run from the sk_data_ready hook and enqueued the data as needed. So we are stuck. To fix do an unlikely check in recvmsg handler for data on the sk_receive_queue and if it exists wake up data_ready. We have the sock locked in both read_skb and recvmsg so should avoid having multiple runners. Fixes: 04919bed948dc ("tcp: Introduce tcp_read_skb()") Signed-off-by: John Fastabend Signed-off-by: Daniel Borkmann Reviewed-by: Jakub Sitnicki Link: https://lore.kernel.org/bpf/20230523025618.113937-7-john.fastabend@gmail.com --- net/ipv4/tcp_bpf.c | 20 ++++++++++++++++++++ 1 file changed, 20 insertions(+) diff --git a/net/ipv4/tcp_bpf.c b/net/ipv4/tcp_bpf.c index 73c13642d47f..01dd76be1a58 100644 --- a/net/ipv4/tcp_bpf.c +++ b/net/ipv4/tcp_bpf.c @@ -212,6 +212,26 @@ static int tcp_bpf_recvmsg_parser(struct sock *sk, return tcp_recvmsg(sk, msg, len, flags, addr_len); lock_sock(sk); + + /* We may have received data on the sk_receive_queue pre-accept and + * then we can not use read_skb in this context because we haven't + * assigned a sk_socket yet so have no link to the ops. The work-around + * is to check the sk_receive_queue and in these cases read skbs off + * queue again. The read_skb hook is not running at this point because + * of lock_sock so we avoid having multiple runners in read_skb. + */ + if (unlikely(!skb_queue_empty(&sk->sk_receive_queue))) { + tcp_data_ready(sk); + /* This handles the ENOMEM errors if we both receive data + * pre accept and are already under memory pressure. At least + * let user know to retry. + */ + if (unlikely(!skb_queue_empty(&sk->sk_receive_queue))) { + copied = -EAGAIN; + goto out; + } + } + msg_bytes_ready: copied = sk_msg_recvmsg(sk, psock, msg, len, flags); /* The typical case for EFAULT is the socket was gracefully From 6df7f764cd3cf5a03a4a47b23be47e57e41fcd85 Mon Sep 17 00:00:00 2001 From: John Fastabend Date: Mon, 22 May 2023 19:56:11 -0700 Subject: [PATCH 55/79] bpf, sockmap: Wake up polling after data copy When TCP stack has data ready to read sk_data_ready() is called. Sockmap overwrites this with its own handler to call into BPF verdict program. But, the original TCP socket had sock_def_readable that would additionally wake up any user space waiters with sk_wake_async(). Sockmap saved the callback when the socket was created so call the saved data ready callback and then we can wake up any epoll() logic waiting on the read. Note we call on 'copied >= 0' to account for returning 0 when a FIN is received because we need to wake up user for this as well so they can do the recvmsg() -> 0 and detect the shutdown. Fixes: 04919bed948dc ("tcp: Introduce tcp_read_skb()") Signed-off-by: John Fastabend Signed-off-by: Daniel Borkmann Reviewed-by: Jakub Sitnicki Link: https://lore.kernel.org/bpf/20230523025618.113937-8-john.fastabend@gmail.com --- net/core/skmsg.c | 11 ++++++++++- 1 file changed, 10 insertions(+), 1 deletion(-) diff --git a/net/core/skmsg.c b/net/core/skmsg.c index bcd45a99a3db..08be5f409fb8 100644 --- a/net/core/skmsg.c +++ b/net/core/skmsg.c @@ -1199,12 +1199,21 @@ out: static void sk_psock_verdict_data_ready(struct sock *sk) { struct socket *sock = sk->sk_socket; + int copied; trace_sk_data_ready(sk); if (unlikely(!sock || !sock->ops || !sock->ops->read_skb)) return; - sock->ops->read_skb(sk, sk_psock_verdict_recv); + copied = sock->ops->read_skb(sk, sk_psock_verdict_recv); + if (copied >= 0) { + struct sk_psock *psock; + + rcu_read_lock(); + psock = sk_psock(sk); + psock->saved_data_ready(sk); + rcu_read_unlock(); + } } void sk_psock_start_verdict(struct sock *sk, struct sk_psock *psock) From e5c6de5fa025882babf89cecbed80acf49b987fa Mon Sep 17 00:00:00 2001 From: John Fastabend Date: Mon, 22 May 2023 19:56:12 -0700 Subject: [PATCH 56/79] bpf, sockmap: Incorrectly handling copied_seq The read_skb() logic is incrementing the tcp->copied_seq which is used for among other things calculating how many outstanding bytes can be read by the application. This results in application errors, if the application does an ioctl(FIONREAD) we return zero because this is calculated from the copied_seq value. To fix this we move tcp->copied_seq accounting into the recv handler so that we update these when the recvmsg() hook is called and data is in fact copied into user buffers. This gives an accurate FIONREAD value as expected and improves ACK handling. Before we were calling the tcp_rcv_space_adjust() which would update 'number of bytes copied to user in last RTT' which is wrong for programs returning SK_PASS. The bytes are only copied to the user when recvmsg is handled. Doing the fix for recvmsg is straightforward, but fixing redirect and SK_DROP pkts is a bit tricker. Build a tcp_psock_eat() helper and then call this from skmsg handlers. This fixes another issue where a broken socket with a BPF program doing a resubmit could hang the receiver. This happened because although read_skb() consumed the skb through sock_drop() it did not update the copied_seq. Now if a single reccv socket is redirecting to many sockets (for example for lb) the receiver sk will be hung even though we might expect it to continue. The hang comes from not updating the copied_seq numbers and memory pressure resulting from that. We have a slight layer problem of calling tcp_eat_skb even if its not a TCP socket. To fix we could refactor and create per type receiver handlers. I decided this is more work than we want in the fix and we already have some small tweaks depending on caller that use the helper skb_bpf_strparser(). So we extend that a bit and always set the strparser bit when it is in use and then we can gate the seq_copied updates on this. Fixes: 04919bed948dc ("tcp: Introduce tcp_read_skb()") Signed-off-by: John Fastabend Signed-off-by: Daniel Borkmann Reviewed-by: Jakub Sitnicki Link: https://lore.kernel.org/bpf/20230523025618.113937-9-john.fastabend@gmail.com --- include/net/tcp.h | 10 ++++++++++ net/core/skmsg.c | 15 +++++++-------- net/ipv4/tcp.c | 10 +--------- net/ipv4/tcp_bpf.c | 28 +++++++++++++++++++++++++++- 4 files changed, 45 insertions(+), 18 deletions(-) diff --git a/include/net/tcp.h b/include/net/tcp.h index 04a31643cda3..18a038d16434 100644 --- a/include/net/tcp.h +++ b/include/net/tcp.h @@ -1470,6 +1470,8 @@ static inline void tcp_adjust_rcv_ssthresh(struct sock *sk) } void tcp_cleanup_rbuf(struct sock *sk, int copied); +void __tcp_cleanup_rbuf(struct sock *sk, int copied); + /* We provision sk_rcvbuf around 200% of sk_rcvlowat. * If 87.5 % (7/8) of the space has been consumed, we want to override @@ -2326,6 +2328,14 @@ int tcp_bpf_update_proto(struct sock *sk, struct sk_psock *psock, bool restore); void tcp_bpf_clone(const struct sock *sk, struct sock *newsk); #endif /* CONFIG_BPF_SYSCALL */ +#ifdef CONFIG_INET +void tcp_eat_skb(struct sock *sk, struct sk_buff *skb); +#else +static inline void tcp_eat_skb(struct sock *sk, struct sk_buff *skb) +{ +} +#endif + int tcp_bpf_sendmsg_redir(struct sock *sk, bool ingress, struct sk_msg *msg, u32 bytes, int flags); #endif /* CONFIG_NET_SOCK_MSG */ diff --git a/net/core/skmsg.c b/net/core/skmsg.c index 08be5f409fb8..a9060e1f0e43 100644 --- a/net/core/skmsg.c +++ b/net/core/skmsg.c @@ -979,10 +979,8 @@ static int sk_psock_verdict_apply(struct sk_psock *psock, struct sk_buff *skb, err = -EIO; sk_other = psock->sk; if (sock_flag(sk_other, SOCK_DEAD) || - !sk_psock_test_state(psock, SK_PSOCK_TX_ENABLED)) { - skb_bpf_redirect_clear(skb); + !sk_psock_test_state(psock, SK_PSOCK_TX_ENABLED)) goto out_free; - } skb_bpf_set_ingress(skb); @@ -1011,18 +1009,19 @@ static int sk_psock_verdict_apply(struct sk_psock *psock, struct sk_buff *skb, err = 0; } spin_unlock_bh(&psock->ingress_lock); - if (err < 0) { - skb_bpf_redirect_clear(skb); + if (err < 0) goto out_free; - } } break; case __SK_REDIRECT: + tcp_eat_skb(psock->sk, skb); err = sk_psock_skb_redirect(psock, skb); break; case __SK_DROP: default: out_free: + skb_bpf_redirect_clear(skb); + tcp_eat_skb(psock->sk, skb); sock_drop(psock->sk, skb); } @@ -1067,8 +1066,7 @@ static void sk_psock_strp_read(struct strparser *strp, struct sk_buff *skb) skb_dst_drop(skb); skb_bpf_redirect_clear(skb); ret = bpf_prog_run_pin_on_cpu(prog, skb); - if (ret == SK_PASS) - skb_bpf_set_strparser(skb); + skb_bpf_set_strparser(skb); ret = sk_psock_map_verd(ret, skb_bpf_redirect_fetch(skb)); skb->sk = NULL; } @@ -1176,6 +1174,7 @@ static int sk_psock_verdict_recv(struct sock *sk, struct sk_buff *skb) psock = sk_psock(sk); if (unlikely(!psock)) { len = 0; + tcp_eat_skb(sk, skb); sock_drop(sk, skb); goto out; } diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c index e914e3446377..a60f6f4e7cd9 100644 --- a/net/ipv4/tcp.c +++ b/net/ipv4/tcp.c @@ -1571,7 +1571,7 @@ static int tcp_peek_sndq(struct sock *sk, struct msghdr *msg, int len) * calculation of whether or not we must ACK for the sake of * a window update. */ -static void __tcp_cleanup_rbuf(struct sock *sk, int copied) +void __tcp_cleanup_rbuf(struct sock *sk, int copied) { struct tcp_sock *tp = tcp_sk(sk); bool time_to_ack = false; @@ -1786,14 +1786,6 @@ int tcp_read_skb(struct sock *sk, skb_read_actor_t recv_actor) break; } } - WRITE_ONCE(tp->copied_seq, seq); - - tcp_rcv_space_adjust(sk); - - /* Clean up data we have read: This will do ACK frames. */ - if (copied > 0) - __tcp_cleanup_rbuf(sk, copied); - return copied; } EXPORT_SYMBOL(tcp_read_skb); diff --git a/net/ipv4/tcp_bpf.c b/net/ipv4/tcp_bpf.c index 01dd76be1a58..5f93918c063c 100644 --- a/net/ipv4/tcp_bpf.c +++ b/net/ipv4/tcp_bpf.c @@ -11,6 +11,24 @@ #include #include +void tcp_eat_skb(struct sock *sk, struct sk_buff *skb) +{ + struct tcp_sock *tcp; + int copied; + + if (!skb || !skb->len || !sk_is_tcp(sk)) + return; + + if (skb_bpf_strparser(skb)) + return; + + tcp = tcp_sk(sk); + copied = tcp->copied_seq + skb->len; + WRITE_ONCE(tcp->copied_seq, copied); + tcp_rcv_space_adjust(sk); + __tcp_cleanup_rbuf(sk, skb->len); +} + static int bpf_tcp_ingress(struct sock *sk, struct sk_psock *psock, struct sk_msg *msg, u32 apply_bytes, int flags) { @@ -198,8 +216,10 @@ static int tcp_bpf_recvmsg_parser(struct sock *sk, int flags, int *addr_len) { + struct tcp_sock *tcp = tcp_sk(sk); + u32 seq = tcp->copied_seq; struct sk_psock *psock; - int copied; + int copied = 0; if (unlikely(flags & MSG_ERRQUEUE)) return inet_recv_error(sk, msg, len, addr_len); @@ -244,9 +264,11 @@ msg_bytes_ready: if (is_fin) { copied = 0; + seq++; goto out; } } + seq += copied; if (!copied) { long timeo; int data; @@ -284,6 +306,10 @@ msg_bytes_ready: copied = -EAGAIN; } out: + WRITE_ONCE(tcp->copied_seq, seq); + tcp_rcv_space_adjust(sk); + if (copied > 0) + __tcp_cleanup_rbuf(sk, copied); release_sock(sk); sk_psock_put(sk, psock); return copied; From 4e02588d9a95b21f8d25d66cb6c24f9df1532f6b Mon Sep 17 00:00:00 2001 From: John Fastabend Date: Mon, 22 May 2023 19:56:13 -0700 Subject: [PATCH 57/79] bpf, sockmap: Pull socket helpers out of listen test for general use No functional change here we merely pull the helpers in sockmap_listen.c into a header file so we can use these in other programs. The tests we are about to add aren't really _listen tests so doesn't make sense to add them here. Signed-off-by: John Fastabend Signed-off-by: Daniel Borkmann Reviewed-by: Jakub Sitnicki Link: https://lore.kernel.org/bpf/20230523025618.113937-10-john.fastabend@gmail.com --- .../bpf/prog_tests/sockmap_helpers.h | 272 ++++++++++++++++++ .../selftests/bpf/prog_tests/sockmap_listen.c | 263 +---------------- 2 files changed, 273 insertions(+), 262 deletions(-) create mode 100644 tools/testing/selftests/bpf/prog_tests/sockmap_helpers.h diff --git a/tools/testing/selftests/bpf/prog_tests/sockmap_helpers.h b/tools/testing/selftests/bpf/prog_tests/sockmap_helpers.h new file mode 100644 index 000000000000..5aa99e6adcb4 --- /dev/null +++ b/tools/testing/selftests/bpf/prog_tests/sockmap_helpers.h @@ -0,0 +1,272 @@ +#ifndef __SOCKMAP_HELPERS__ +#define __SOCKMAP_HELPERS__ + +#include + +#define IO_TIMEOUT_SEC 30 +#define MAX_STRERR_LEN 256 +#define MAX_TEST_NAME 80 + +/* workaround for older vm_sockets.h */ +#ifndef VMADDR_CID_LOCAL +#define VMADDR_CID_LOCAL 1 +#endif + +#define __always_unused __attribute__((__unused__)) + +#define _FAIL(errnum, fmt...) \ + ({ \ + error_at_line(0, (errnum), __func__, __LINE__, fmt); \ + CHECK_FAIL(true); \ + }) +#define FAIL(fmt...) _FAIL(0, fmt) +#define FAIL_ERRNO(fmt...) _FAIL(errno, fmt) +#define FAIL_LIBBPF(err, msg) \ + ({ \ + char __buf[MAX_STRERR_LEN]; \ + libbpf_strerror((err), __buf, sizeof(__buf)); \ + FAIL("%s: %s", (msg), __buf); \ + }) + +/* Wrappers that fail the test on error and report it. */ + +#define xaccept_nonblock(fd, addr, len) \ + ({ \ + int __ret = \ + accept_timeout((fd), (addr), (len), IO_TIMEOUT_SEC); \ + if (__ret == -1) \ + FAIL_ERRNO("accept"); \ + __ret; \ + }) + +#define xbind(fd, addr, len) \ + ({ \ + int __ret = bind((fd), (addr), (len)); \ + if (__ret == -1) \ + FAIL_ERRNO("bind"); \ + __ret; \ + }) + +#define xclose(fd) \ + ({ \ + int __ret = close((fd)); \ + if (__ret == -1) \ + FAIL_ERRNO("close"); \ + __ret; \ + }) + +#define xconnect(fd, addr, len) \ + ({ \ + int __ret = connect((fd), (addr), (len)); \ + if (__ret == -1) \ + FAIL_ERRNO("connect"); \ + __ret; \ + }) + +#define xgetsockname(fd, addr, len) \ + ({ \ + int __ret = getsockname((fd), (addr), (len)); \ + if (__ret == -1) \ + FAIL_ERRNO("getsockname"); \ + __ret; \ + }) + +#define xgetsockopt(fd, level, name, val, len) \ + ({ \ + int __ret = getsockopt((fd), (level), (name), (val), (len)); \ + if (__ret == -1) \ + FAIL_ERRNO("getsockopt(" #name ")"); \ + __ret; \ + }) + +#define xlisten(fd, backlog) \ + ({ \ + int __ret = listen((fd), (backlog)); \ + if (__ret == -1) \ + FAIL_ERRNO("listen"); \ + __ret; \ + }) + +#define xsetsockopt(fd, level, name, val, len) \ + ({ \ + int __ret = setsockopt((fd), (level), (name), (val), (len)); \ + if (__ret == -1) \ + FAIL_ERRNO("setsockopt(" #name ")"); \ + __ret; \ + }) + +#define xsend(fd, buf, len, flags) \ + ({ \ + ssize_t __ret = send((fd), (buf), (len), (flags)); \ + if (__ret == -1) \ + FAIL_ERRNO("send"); \ + __ret; \ + }) + +#define xrecv_nonblock(fd, buf, len, flags) \ + ({ \ + ssize_t __ret = recv_timeout((fd), (buf), (len), (flags), \ + IO_TIMEOUT_SEC); \ + if (__ret == -1) \ + FAIL_ERRNO("recv"); \ + __ret; \ + }) + +#define xsocket(family, sotype, flags) \ + ({ \ + int __ret = socket(family, sotype, flags); \ + if (__ret == -1) \ + FAIL_ERRNO("socket"); \ + __ret; \ + }) + +#define xbpf_map_delete_elem(fd, key) \ + ({ \ + int __ret = bpf_map_delete_elem((fd), (key)); \ + if (__ret < 0) \ + FAIL_ERRNO("map_delete"); \ + __ret; \ + }) + +#define xbpf_map_lookup_elem(fd, key, val) \ + ({ \ + int __ret = bpf_map_lookup_elem((fd), (key), (val)); \ + if (__ret < 0) \ + FAIL_ERRNO("map_lookup"); \ + __ret; \ + }) + +#define xbpf_map_update_elem(fd, key, val, flags) \ + ({ \ + int __ret = bpf_map_update_elem((fd), (key), (val), (flags)); \ + if (__ret < 0) \ + FAIL_ERRNO("map_update"); \ + __ret; \ + }) + +#define xbpf_prog_attach(prog, target, type, flags) \ + ({ \ + int __ret = \ + bpf_prog_attach((prog), (target), (type), (flags)); \ + if (__ret < 0) \ + FAIL_ERRNO("prog_attach(" #type ")"); \ + __ret; \ + }) + +#define xbpf_prog_detach2(prog, target, type) \ + ({ \ + int __ret = bpf_prog_detach2((prog), (target), (type)); \ + if (__ret < 0) \ + FAIL_ERRNO("prog_detach2(" #type ")"); \ + __ret; \ + }) + +#define xpthread_create(thread, attr, func, arg) \ + ({ \ + int __ret = pthread_create((thread), (attr), (func), (arg)); \ + errno = __ret; \ + if (__ret) \ + FAIL_ERRNO("pthread_create"); \ + __ret; \ + }) + +#define xpthread_join(thread, retval) \ + ({ \ + int __ret = pthread_join((thread), (retval)); \ + errno = __ret; \ + if (__ret) \ + FAIL_ERRNO("pthread_join"); \ + __ret; \ + }) + +static inline int poll_read(int fd, unsigned int timeout_sec) +{ + struct timeval timeout = { .tv_sec = timeout_sec }; + fd_set rfds; + int r; + + FD_ZERO(&rfds); + FD_SET(fd, &rfds); + + r = select(fd + 1, &rfds, NULL, NULL, &timeout); + if (r == 0) + errno = ETIME; + + return r == 1 ? 0 : -1; +} + +static inline int accept_timeout(int fd, struct sockaddr *addr, socklen_t *len, + unsigned int timeout_sec) +{ + if (poll_read(fd, timeout_sec)) + return -1; + + return accept(fd, addr, len); +} + +static inline int recv_timeout(int fd, void *buf, size_t len, int flags, + unsigned int timeout_sec) +{ + if (poll_read(fd, timeout_sec)) + return -1; + + return recv(fd, buf, len, flags); +} + +static inline void init_addr_loopback4(struct sockaddr_storage *ss, + socklen_t *len) +{ + struct sockaddr_in *addr4 = memset(ss, 0, sizeof(*ss)); + + addr4->sin_family = AF_INET; + addr4->sin_port = 0; + addr4->sin_addr.s_addr = htonl(INADDR_LOOPBACK); + *len = sizeof(*addr4); +} + +static inline void init_addr_loopback6(struct sockaddr_storage *ss, + socklen_t *len) +{ + struct sockaddr_in6 *addr6 = memset(ss, 0, sizeof(*ss)); + + addr6->sin6_family = AF_INET6; + addr6->sin6_port = 0; + addr6->sin6_addr = in6addr_loopback; + *len = sizeof(*addr6); +} + +static inline void init_addr_loopback_vsock(struct sockaddr_storage *ss, + socklen_t *len) +{ + struct sockaddr_vm *addr = memset(ss, 0, sizeof(*ss)); + + addr->svm_family = AF_VSOCK; + addr->svm_port = VMADDR_PORT_ANY; + addr->svm_cid = VMADDR_CID_LOCAL; + *len = sizeof(*addr); +} + +static inline void init_addr_loopback(int family, struct sockaddr_storage *ss, + socklen_t *len) +{ + switch (family) { + case AF_INET: + init_addr_loopback4(ss, len); + return; + case AF_INET6: + init_addr_loopback6(ss, len); + return; + case AF_VSOCK: + init_addr_loopback_vsock(ss, len); + return; + default: + FAIL("unsupported address family %d", family); + } +} + +static inline struct sockaddr *sockaddr(struct sockaddr_storage *ss) +{ + return (struct sockaddr *)ss; +} + +#endif // __SOCKMAP_HELPERS__ diff --git a/tools/testing/selftests/bpf/prog_tests/sockmap_listen.c b/tools/testing/selftests/bpf/prog_tests/sockmap_listen.c index 141c1e5944ee..7dc8dd713256 100644 --- a/tools/testing/selftests/bpf/prog_tests/sockmap_listen.c +++ b/tools/testing/selftests/bpf/prog_tests/sockmap_listen.c @@ -20,11 +20,6 @@ #include #include -/* workaround for older vm_sockets.h */ -#ifndef VMADDR_CID_LOCAL -#define VMADDR_CID_LOCAL 1 -#endif - #include #include @@ -32,263 +27,7 @@ #include "test_progs.h" #include "test_sockmap_listen.skel.h" -#define IO_TIMEOUT_SEC 30 -#define MAX_STRERR_LEN 256 -#define MAX_TEST_NAME 80 - -#define __always_unused __attribute__((__unused__)) - -#define _FAIL(errnum, fmt...) \ - ({ \ - error_at_line(0, (errnum), __func__, __LINE__, fmt); \ - CHECK_FAIL(true); \ - }) -#define FAIL(fmt...) _FAIL(0, fmt) -#define FAIL_ERRNO(fmt...) _FAIL(errno, fmt) -#define FAIL_LIBBPF(err, msg) \ - ({ \ - char __buf[MAX_STRERR_LEN]; \ - libbpf_strerror((err), __buf, sizeof(__buf)); \ - FAIL("%s: %s", (msg), __buf); \ - }) - -/* Wrappers that fail the test on error and report it. */ - -#define xaccept_nonblock(fd, addr, len) \ - ({ \ - int __ret = \ - accept_timeout((fd), (addr), (len), IO_TIMEOUT_SEC); \ - if (__ret == -1) \ - FAIL_ERRNO("accept"); \ - __ret; \ - }) - -#define xbind(fd, addr, len) \ - ({ \ - int __ret = bind((fd), (addr), (len)); \ - if (__ret == -1) \ - FAIL_ERRNO("bind"); \ - __ret; \ - }) - -#define xclose(fd) \ - ({ \ - int __ret = close((fd)); \ - if (__ret == -1) \ - FAIL_ERRNO("close"); \ - __ret; \ - }) - -#define xconnect(fd, addr, len) \ - ({ \ - int __ret = connect((fd), (addr), (len)); \ - if (__ret == -1) \ - FAIL_ERRNO("connect"); \ - __ret; \ - }) - -#define xgetsockname(fd, addr, len) \ - ({ \ - int __ret = getsockname((fd), (addr), (len)); \ - if (__ret == -1) \ - FAIL_ERRNO("getsockname"); \ - __ret; \ - }) - -#define xgetsockopt(fd, level, name, val, len) \ - ({ \ - int __ret = getsockopt((fd), (level), (name), (val), (len)); \ - if (__ret == -1) \ - FAIL_ERRNO("getsockopt(" #name ")"); \ - __ret; \ - }) - -#define xlisten(fd, backlog) \ - ({ \ - int __ret = listen((fd), (backlog)); \ - if (__ret == -1) \ - FAIL_ERRNO("listen"); \ - __ret; \ - }) - -#define xsetsockopt(fd, level, name, val, len) \ - ({ \ - int __ret = setsockopt((fd), (level), (name), (val), (len)); \ - if (__ret == -1) \ - FAIL_ERRNO("setsockopt(" #name ")"); \ - __ret; \ - }) - -#define xsend(fd, buf, len, flags) \ - ({ \ - ssize_t __ret = send((fd), (buf), (len), (flags)); \ - if (__ret == -1) \ - FAIL_ERRNO("send"); \ - __ret; \ - }) - -#define xrecv_nonblock(fd, buf, len, flags) \ - ({ \ - ssize_t __ret = recv_timeout((fd), (buf), (len), (flags), \ - IO_TIMEOUT_SEC); \ - if (__ret == -1) \ - FAIL_ERRNO("recv"); \ - __ret; \ - }) - -#define xsocket(family, sotype, flags) \ - ({ \ - int __ret = socket(family, sotype, flags); \ - if (__ret == -1) \ - FAIL_ERRNO("socket"); \ - __ret; \ - }) - -#define xbpf_map_delete_elem(fd, key) \ - ({ \ - int __ret = bpf_map_delete_elem((fd), (key)); \ - if (__ret < 0) \ - FAIL_ERRNO("map_delete"); \ - __ret; \ - }) - -#define xbpf_map_lookup_elem(fd, key, val) \ - ({ \ - int __ret = bpf_map_lookup_elem((fd), (key), (val)); \ - if (__ret < 0) \ - FAIL_ERRNO("map_lookup"); \ - __ret; \ - }) - -#define xbpf_map_update_elem(fd, key, val, flags) \ - ({ \ - int __ret = bpf_map_update_elem((fd), (key), (val), (flags)); \ - if (__ret < 0) \ - FAIL_ERRNO("map_update"); \ - __ret; \ - }) - -#define xbpf_prog_attach(prog, target, type, flags) \ - ({ \ - int __ret = \ - bpf_prog_attach((prog), (target), (type), (flags)); \ - if (__ret < 0) \ - FAIL_ERRNO("prog_attach(" #type ")"); \ - __ret; \ - }) - -#define xbpf_prog_detach2(prog, target, type) \ - ({ \ - int __ret = bpf_prog_detach2((prog), (target), (type)); \ - if (__ret < 0) \ - FAIL_ERRNO("prog_detach2(" #type ")"); \ - __ret; \ - }) - -#define xpthread_create(thread, attr, func, arg) \ - ({ \ - int __ret = pthread_create((thread), (attr), (func), (arg)); \ - errno = __ret; \ - if (__ret) \ - FAIL_ERRNO("pthread_create"); \ - __ret; \ - }) - -#define xpthread_join(thread, retval) \ - ({ \ - int __ret = pthread_join((thread), (retval)); \ - errno = __ret; \ - if (__ret) \ - FAIL_ERRNO("pthread_join"); \ - __ret; \ - }) - -static int poll_read(int fd, unsigned int timeout_sec) -{ - struct timeval timeout = { .tv_sec = timeout_sec }; - fd_set rfds; - int r; - - FD_ZERO(&rfds); - FD_SET(fd, &rfds); - - r = select(fd + 1, &rfds, NULL, NULL, &timeout); - if (r == 0) - errno = ETIME; - - return r == 1 ? 0 : -1; -} - -static int accept_timeout(int fd, struct sockaddr *addr, socklen_t *len, - unsigned int timeout_sec) -{ - if (poll_read(fd, timeout_sec)) - return -1; - - return accept(fd, addr, len); -} - -static int recv_timeout(int fd, void *buf, size_t len, int flags, - unsigned int timeout_sec) -{ - if (poll_read(fd, timeout_sec)) - return -1; - - return recv(fd, buf, len, flags); -} - -static void init_addr_loopback4(struct sockaddr_storage *ss, socklen_t *len) -{ - struct sockaddr_in *addr4 = memset(ss, 0, sizeof(*ss)); - - addr4->sin_family = AF_INET; - addr4->sin_port = 0; - addr4->sin_addr.s_addr = htonl(INADDR_LOOPBACK); - *len = sizeof(*addr4); -} - -static void init_addr_loopback6(struct sockaddr_storage *ss, socklen_t *len) -{ - struct sockaddr_in6 *addr6 = memset(ss, 0, sizeof(*ss)); - - addr6->sin6_family = AF_INET6; - addr6->sin6_port = 0; - addr6->sin6_addr = in6addr_loopback; - *len = sizeof(*addr6); -} - -static void init_addr_loopback_vsock(struct sockaddr_storage *ss, socklen_t *len) -{ - struct sockaddr_vm *addr = memset(ss, 0, sizeof(*ss)); - - addr->svm_family = AF_VSOCK; - addr->svm_port = VMADDR_PORT_ANY; - addr->svm_cid = VMADDR_CID_LOCAL; - *len = sizeof(*addr); -} - -static void init_addr_loopback(int family, struct sockaddr_storage *ss, - socklen_t *len) -{ - switch (family) { - case AF_INET: - init_addr_loopback4(ss, len); - return; - case AF_INET6: - init_addr_loopback6(ss, len); - return; - case AF_VSOCK: - init_addr_loopback_vsock(ss, len); - return; - default: - FAIL("unsupported address family %d", family); - } -} - -static inline struct sockaddr *sockaddr(struct sockaddr_storage *ss) -{ - return (struct sockaddr *)ss; -} +#include "sockmap_helpers.h" static int enable_reuseport(int s, int progfd) { From 298970c8af9d73a9f3df4ac871a4456f87fd4cab Mon Sep 17 00:00:00 2001 From: John Fastabend Date: Mon, 22 May 2023 19:56:14 -0700 Subject: [PATCH 58/79] bpf, sockmap: Build helper to create connected socket pair A common operation for testing is to spin up a pair of sockets that are connected. Then we can use these to run specific tests that need to send data, check BPF programs and so on. The sockmap_listen programs already have this logic lets move it into the new sockmap_helpers header file for general use. Signed-off-by: John Fastabend Signed-off-by: Daniel Borkmann Reviewed-by: Jakub Sitnicki Link: https://lore.kernel.org/bpf/20230523025618.113937-11-john.fastabend@gmail.com --- .../bpf/prog_tests/sockmap_helpers.h | 118 ++++++++++++++++++ .../selftests/bpf/prog_tests/sockmap_listen.c | 107 +--------------- 2 files changed, 123 insertions(+), 102 deletions(-) diff --git a/tools/testing/selftests/bpf/prog_tests/sockmap_helpers.h b/tools/testing/selftests/bpf/prog_tests/sockmap_helpers.h index 5aa99e6adcb4..d12665490a90 100644 --- a/tools/testing/selftests/bpf/prog_tests/sockmap_helpers.h +++ b/tools/testing/selftests/bpf/prog_tests/sockmap_helpers.h @@ -269,4 +269,122 @@ static inline struct sockaddr *sockaddr(struct sockaddr_storage *ss) return (struct sockaddr *)ss; } +static inline int add_to_sockmap(int sock_mapfd, int fd1, int fd2) +{ + u64 value; + u32 key; + int err; + + key = 0; + value = fd1; + err = xbpf_map_update_elem(sock_mapfd, &key, &value, BPF_NOEXIST); + if (err) + return err; + + key = 1; + value = fd2; + return xbpf_map_update_elem(sock_mapfd, &key, &value, BPF_NOEXIST); +} + +static inline int create_pair(int s, int family, int sotype, int *c, int *p) +{ + struct sockaddr_storage addr; + socklen_t len; + int err = 0; + + len = sizeof(addr); + err = xgetsockname(s, sockaddr(&addr), &len); + if (err) + return err; + + *c = xsocket(family, sotype, 0); + if (*c < 0) + return errno; + err = xconnect(*c, sockaddr(&addr), len); + if (err) { + err = errno; + goto close_cli0; + } + + *p = xaccept_nonblock(s, NULL, NULL); + if (*p < 0) { + err = errno; + goto close_cli0; + } + return err; +close_cli0: + close(*c); + return err; +} + +static inline int create_socket_pairs(int s, int family, int sotype, + int *c0, int *c1, int *p0, int *p1) +{ + int err; + + err = create_pair(s, family, sotype, c0, p0); + if (err) + return err; + + err = create_pair(s, family, sotype, c1, p1); + if (err) { + close(*c0); + close(*p0); + } + return err; +} + +static inline int enable_reuseport(int s, int progfd) +{ + int err, one = 1; + + err = xsetsockopt(s, SOL_SOCKET, SO_REUSEPORT, &one, sizeof(one)); + if (err) + return -1; + err = xsetsockopt(s, SOL_SOCKET, SO_ATTACH_REUSEPORT_EBPF, &progfd, + sizeof(progfd)); + if (err) + return -1; + + return 0; +} + +static inline int socket_loopback_reuseport(int family, int sotype, int progfd) +{ + struct sockaddr_storage addr; + socklen_t len; + int err, s; + + init_addr_loopback(family, &addr, &len); + + s = xsocket(family, sotype, 0); + if (s == -1) + return -1; + + if (progfd >= 0) + enable_reuseport(s, progfd); + + err = xbind(s, sockaddr(&addr), len); + if (err) + goto close; + + if (sotype & SOCK_DGRAM) + return s; + + err = xlisten(s, SOMAXCONN); + if (err) + goto close; + + return s; +close: + xclose(s); + return -1; +} + +static inline int socket_loopback(int family, int sotype) +{ + return socket_loopback_reuseport(family, sotype, -1); +} + + #endif // __SOCKMAP_HELPERS__ diff --git a/tools/testing/selftests/bpf/prog_tests/sockmap_listen.c b/tools/testing/selftests/bpf/prog_tests/sockmap_listen.c index 7dc8dd713256..b4f6f3a50ae5 100644 --- a/tools/testing/selftests/bpf/prog_tests/sockmap_listen.c +++ b/tools/testing/selftests/bpf/prog_tests/sockmap_listen.c @@ -29,58 +29,6 @@ #include "sockmap_helpers.h" -static int enable_reuseport(int s, int progfd) -{ - int err, one = 1; - - err = xsetsockopt(s, SOL_SOCKET, SO_REUSEPORT, &one, sizeof(one)); - if (err) - return -1; - err = xsetsockopt(s, SOL_SOCKET, SO_ATTACH_REUSEPORT_EBPF, &progfd, - sizeof(progfd)); - if (err) - return -1; - - return 0; -} - -static int socket_loopback_reuseport(int family, int sotype, int progfd) -{ - struct sockaddr_storage addr; - socklen_t len; - int err, s; - - init_addr_loopback(family, &addr, &len); - - s = xsocket(family, sotype, 0); - if (s == -1) - return -1; - - if (progfd >= 0) - enable_reuseport(s, progfd); - - err = xbind(s, sockaddr(&addr), len); - if (err) - goto close; - - if (sotype & SOCK_DGRAM) - return s; - - err = xlisten(s, SOMAXCONN); - if (err) - goto close; - - return s; -close: - xclose(s); - return -1; -} - -static int socket_loopback(int family, int sotype) -{ - return socket_loopback_reuseport(family, sotype, -1); -} - static void test_insert_invalid(struct test_sockmap_listen *skel __always_unused, int family, int sotype, int mapfd) { @@ -723,31 +671,12 @@ static const char *redir_mode_str(enum redir_mode mode) } } -static int add_to_sockmap(int sock_mapfd, int fd1, int fd2) -{ - u64 value; - u32 key; - int err; - - key = 0; - value = fd1; - err = xbpf_map_update_elem(sock_mapfd, &key, &value, BPF_NOEXIST); - if (err) - return err; - - key = 1; - value = fd2; - return xbpf_map_update_elem(sock_mapfd, &key, &value, BPF_NOEXIST); -} - static void redir_to_connected(int family, int sotype, int sock_mapfd, int verd_mapfd, enum redir_mode mode) { const char *log_prefix = redir_mode_str(mode); - struct sockaddr_storage addr; int s, c0, c1, p0, p1; unsigned int pass; - socklen_t len; int err, n; u32 key; char b; @@ -758,36 +687,13 @@ static void redir_to_connected(int family, int sotype, int sock_mapfd, if (s < 0) return; - len = sizeof(addr); - err = xgetsockname(s, sockaddr(&addr), &len); + err = create_socket_pairs(s, family, sotype, &c0, &c1, &p0, &p1); if (err) goto close_srv; - c0 = xsocket(family, sotype, 0); - if (c0 < 0) - goto close_srv; - err = xconnect(c0, sockaddr(&addr), len); - if (err) - goto close_cli0; - - p0 = xaccept_nonblock(s, NULL, NULL); - if (p0 < 0) - goto close_cli0; - - c1 = xsocket(family, sotype, 0); - if (c1 < 0) - goto close_peer0; - err = xconnect(c1, sockaddr(&addr), len); - if (err) - goto close_cli1; - - p1 = xaccept_nonblock(s, NULL, NULL); - if (p1 < 0) - goto close_cli1; - err = add_to_sockmap(sock_mapfd, p0, p1); if (err) - goto close_peer1; + goto close; n = write(mode == REDIR_INGRESS ? c1 : p1, "a", 1); if (n < 0) @@ -795,12 +701,12 @@ static void redir_to_connected(int family, int sotype, int sock_mapfd, if (n == 0) FAIL("%s: incomplete write", log_prefix); if (n < 1) - goto close_peer1; + goto close; key = SK_PASS; err = xbpf_map_lookup_elem(verd_mapfd, &key, &pass); if (err) - goto close_peer1; + goto close; if (pass != 1) FAIL("%s: want pass count 1, have %d", log_prefix, pass); n = recv_timeout(c0, &b, 1, 0, IO_TIMEOUT_SEC); @@ -809,13 +715,10 @@ static void redir_to_connected(int family, int sotype, int sock_mapfd, if (n == 0) FAIL("%s: incomplete recv", log_prefix); -close_peer1: +close: xclose(p1); -close_cli1: xclose(c1); -close_peer0: xclose(p0); -close_cli0: xclose(c0); close_srv: xclose(s); From 1fa1fe8ff161cc34620e31295235dc1e6d0dce77 Mon Sep 17 00:00:00 2001 From: John Fastabend Date: Mon, 22 May 2023 19:56:15 -0700 Subject: [PATCH 59/79] bpf, sockmap: Test shutdown() correctly exits epoll and recv()=0 When session gracefully shutdowns epoll needs to wake up and any recv() readers should return 0 not the -EAGAIN they previously returned. Note we use epoll instead of select to test the epoll wake on shutdown event as well. Signed-off-by: John Fastabend Signed-off-by: Daniel Borkmann Reviewed-by: Jakub Sitnicki Link: https://lore.kernel.org/bpf/20230523025618.113937-12-john.fastabend@gmail.com --- .../selftests/bpf/prog_tests/sockmap_basic.c | 62 +++++++++++++++++++ .../bpf/progs/test_sockmap_pass_prog.c | 32 ++++++++++ 2 files changed, 94 insertions(+) create mode 100644 tools/testing/selftests/bpf/progs/test_sockmap_pass_prog.c diff --git a/tools/testing/selftests/bpf/prog_tests/sockmap_basic.c b/tools/testing/selftests/bpf/prog_tests/sockmap_basic.c index 0ce25a967481..615a8164c8f0 100644 --- a/tools/testing/selftests/bpf/prog_tests/sockmap_basic.c +++ b/tools/testing/selftests/bpf/prog_tests/sockmap_basic.c @@ -2,6 +2,7 @@ // Copyright (c) 2020 Cloudflare #include #include +#include #include "test_progs.h" #include "test_skmsg_load_helpers.skel.h" @@ -9,8 +10,11 @@ #include "test_sockmap_invalid_update.skel.h" #include "test_sockmap_skb_verdict_attach.skel.h" #include "test_sockmap_progs_query.skel.h" +#include "test_sockmap_pass_prog.skel.h" #include "bpf_iter_sockmap.skel.h" +#include "sockmap_helpers.h" + #define TCP_REPAIR 19 /* TCP sock is under repair right now */ #define TCP_REPAIR_ON 1 @@ -350,6 +354,62 @@ out: test_sockmap_progs_query__destroy(skel); } +#define MAX_EVENTS 10 +static void test_sockmap_skb_verdict_shutdown(void) +{ + struct epoll_event ev, events[MAX_EVENTS]; + int n, err, map, verdict, s, c1, p1; + struct test_sockmap_pass_prog *skel; + int epollfd; + int zero = 0; + char b; + + skel = test_sockmap_pass_prog__open_and_load(); + if (!ASSERT_OK_PTR(skel, "open_and_load")) + return; + + verdict = bpf_program__fd(skel->progs.prog_skb_verdict); + map = bpf_map__fd(skel->maps.sock_map_rx); + + err = bpf_prog_attach(verdict, map, BPF_SK_SKB_STREAM_VERDICT, 0); + if (!ASSERT_OK(err, "bpf_prog_attach")) + goto out; + + s = socket_loopback(AF_INET, SOCK_STREAM); + if (s < 0) + goto out; + err = create_pair(s, AF_INET, SOCK_STREAM, &c1, &p1); + if (err < 0) + goto out; + + err = bpf_map_update_elem(map, &zero, &c1, BPF_NOEXIST); + if (err < 0) + goto out_close; + + shutdown(p1, SHUT_WR); + + ev.events = EPOLLIN; + ev.data.fd = c1; + + epollfd = epoll_create1(0); + if (!ASSERT_GT(epollfd, -1, "epoll_create(0)")) + goto out_close; + err = epoll_ctl(epollfd, EPOLL_CTL_ADD, c1, &ev); + if (!ASSERT_OK(err, "epoll_ctl(EPOLL_CTL_ADD)")) + goto out_close; + err = epoll_wait(epollfd, events, MAX_EVENTS, -1); + if (!ASSERT_EQ(err, 1, "epoll_wait(fd)")) + goto out_close; + + n = recv(c1, &b, 1, SOCK_NONBLOCK); + ASSERT_EQ(n, 0, "recv_timeout(fin)"); +out_close: + close(c1); + close(p1); +out: + test_sockmap_pass_prog__destroy(skel); +} + void test_sockmap_basic(void) { if (test__start_subtest("sockmap create_update_free")) @@ -384,4 +444,6 @@ void test_sockmap_basic(void) test_sockmap_progs_query(BPF_SK_SKB_STREAM_VERDICT); if (test__start_subtest("sockmap skb_verdict progs query")) test_sockmap_progs_query(BPF_SK_SKB_VERDICT); + if (test__start_subtest("sockmap skb_verdict shutdown")) + test_sockmap_skb_verdict_shutdown(); } diff --git a/tools/testing/selftests/bpf/progs/test_sockmap_pass_prog.c b/tools/testing/selftests/bpf/progs/test_sockmap_pass_prog.c new file mode 100644 index 000000000000..1d86a717a290 --- /dev/null +++ b/tools/testing/selftests/bpf/progs/test_sockmap_pass_prog.c @@ -0,0 +1,32 @@ +#include +#include +#include + +struct { + __uint(type, BPF_MAP_TYPE_SOCKMAP); + __uint(max_entries, 20); + __type(key, int); + __type(value, int); +} sock_map_rx SEC(".maps"); + +struct { + __uint(type, BPF_MAP_TYPE_SOCKMAP); + __uint(max_entries, 20); + __type(key, int); + __type(value, int); +} sock_map_tx SEC(".maps"); + +struct { + __uint(type, BPF_MAP_TYPE_SOCKMAP); + __uint(max_entries, 20); + __type(key, int); + __type(value, int); +} sock_map_msg SEC(".maps"); + +SEC("sk_skb") +int prog_skb_verdict(struct __sk_buff *skb) +{ + return SK_PASS; +} + +char _license[] SEC("license") = "GPL"; From bb516f98c731cafe27900f43c0ef0aa7b5a9a44b Mon Sep 17 00:00:00 2001 From: John Fastabend Date: Mon, 22 May 2023 19:56:16 -0700 Subject: [PATCH 60/79] bpf, sockmap: Test FIONREAD returns correct bytes in rx buffer A bug was reported where ioctl(FIONREAD) returned zero even though the socket with a SK_SKB verdict program attached had bytes in the msg queue. The result is programs may hang or more likely try to recover, but use suboptimal buffer sizes. Add a test to check that ioctl(FIONREAD) returns the correct number of bytes. Signed-off-by: John Fastabend Signed-off-by: Daniel Borkmann Reviewed-by: Jakub Sitnicki Link: https://lore.kernel.org/bpf/20230523025618.113937-13-john.fastabend@gmail.com --- .../selftests/bpf/prog_tests/sockmap_basic.c | 48 +++++++++++++++++++ 1 file changed, 48 insertions(+) diff --git a/tools/testing/selftests/bpf/prog_tests/sockmap_basic.c b/tools/testing/selftests/bpf/prog_tests/sockmap_basic.c index 615a8164c8f0..fe56049f6568 100644 --- a/tools/testing/selftests/bpf/prog_tests/sockmap_basic.c +++ b/tools/testing/selftests/bpf/prog_tests/sockmap_basic.c @@ -410,6 +410,52 @@ out: test_sockmap_pass_prog__destroy(skel); } +static void test_sockmap_skb_verdict_fionread(void) +{ + int err, map, verdict, s, c0, c1, p0, p1; + struct test_sockmap_pass_prog *skel; + int zero = 0, sent, recvd, avail; + char buf[256] = "0123456789"; + + skel = test_sockmap_pass_prog__open_and_load(); + if (!ASSERT_OK_PTR(skel, "open_and_load")) + return; + + verdict = bpf_program__fd(skel->progs.prog_skb_verdict); + map = bpf_map__fd(skel->maps.sock_map_rx); + + err = bpf_prog_attach(verdict, map, BPF_SK_SKB_STREAM_VERDICT, 0); + if (!ASSERT_OK(err, "bpf_prog_attach")) + goto out; + + s = socket_loopback(AF_INET, SOCK_STREAM); + if (!ASSERT_GT(s, -1, "socket_loopback(s)")) + goto out; + err = create_socket_pairs(s, AF_INET, SOCK_STREAM, &c0, &c1, &p0, &p1); + if (!ASSERT_OK(err, "create_socket_pairs(s)")) + goto out; + + err = bpf_map_update_elem(map, &zero, &c1, BPF_NOEXIST); + if (!ASSERT_OK(err, "bpf_map_update_elem(c1)")) + goto out_close; + + sent = xsend(p1, &buf, sizeof(buf), 0); + ASSERT_EQ(sent, sizeof(buf), "xsend(p0)"); + err = ioctl(c1, FIONREAD, &avail); + ASSERT_OK(err, "ioctl(FIONREAD) error"); + ASSERT_EQ(avail, sizeof(buf), "ioctl(FIONREAD)"); + recvd = recv_timeout(c1, &buf, sizeof(buf), SOCK_NONBLOCK, IO_TIMEOUT_SEC); + ASSERT_EQ(recvd, sizeof(buf), "recv_timeout(c0)"); + +out_close: + close(c0); + close(p0); + close(c1); + close(p1); +out: + test_sockmap_pass_prog__destroy(skel); +} + void test_sockmap_basic(void) { if (test__start_subtest("sockmap create_update_free")) @@ -446,4 +492,6 @@ void test_sockmap_basic(void) test_sockmap_progs_query(BPF_SK_SKB_VERDICT); if (test__start_subtest("sockmap skb_verdict shutdown")) test_sockmap_skb_verdict_shutdown(); + if (test__start_subtest("sockmap skb_verdict fionread")) + test_sockmap_skb_verdict_fionread(); } From 80e24d22267597b2ace54686735e3c4cb26a523f Mon Sep 17 00:00:00 2001 From: John Fastabend Date: Mon, 22 May 2023 19:56:17 -0700 Subject: [PATCH 61/79] bpf, sockmap: Test FIONREAD returns correct bytes in rx buffer with drops When BPF program drops pkts the sockmap logic 'eats' the packet and updates copied_seq. In the PASS case where the sk_buff is accepted we update copied_seq from recvmsg path so we need a new test to handle the drop case. Original patch series broke this resulting in test_sockmap_skb_verdict_fionread:PASS:ioctl(FIONREAD) error 0 nsec test_sockmap_skb_verdict_fionread:FAIL:ioctl(FIONREAD) unexpected ioctl(FIONREAD): actual 1503041772 != expected 256 After updated patch with fix. Signed-off-by: John Fastabend Signed-off-by: Daniel Borkmann Reviewed-by: Jakub Sitnicki Link: https://lore.kernel.org/bpf/20230523025618.113937-14-john.fastabend@gmail.com --- .../selftests/bpf/prog_tests/sockmap_basic.c | 47 ++++++++++++++----- .../bpf/progs/test_sockmap_drop_prog.c | 32 +++++++++++++ 2 files changed, 66 insertions(+), 13 deletions(-) create mode 100644 tools/testing/selftests/bpf/progs/test_sockmap_drop_prog.c diff --git a/tools/testing/selftests/bpf/prog_tests/sockmap_basic.c b/tools/testing/selftests/bpf/prog_tests/sockmap_basic.c index fe56049f6568..064cc5e8d9ad 100644 --- a/tools/testing/selftests/bpf/prog_tests/sockmap_basic.c +++ b/tools/testing/selftests/bpf/prog_tests/sockmap_basic.c @@ -11,6 +11,7 @@ #include "test_sockmap_skb_verdict_attach.skel.h" #include "test_sockmap_progs_query.skel.h" #include "test_sockmap_pass_prog.skel.h" +#include "test_sockmap_drop_prog.skel.h" #include "bpf_iter_sockmap.skel.h" #include "sockmap_helpers.h" @@ -410,19 +411,31 @@ out: test_sockmap_pass_prog__destroy(skel); } -static void test_sockmap_skb_verdict_fionread(void) +static void test_sockmap_skb_verdict_fionread(bool pass_prog) { + int expected, zero = 0, sent, recvd, avail; int err, map, verdict, s, c0, c1, p0, p1; - struct test_sockmap_pass_prog *skel; - int zero = 0, sent, recvd, avail; + struct test_sockmap_pass_prog *pass; + struct test_sockmap_drop_prog *drop; char buf[256] = "0123456789"; - skel = test_sockmap_pass_prog__open_and_load(); - if (!ASSERT_OK_PTR(skel, "open_and_load")) - return; + if (pass_prog) { + pass = test_sockmap_pass_prog__open_and_load(); + if (!ASSERT_OK_PTR(pass, "open_and_load")) + return; + verdict = bpf_program__fd(pass->progs.prog_skb_verdict); + map = bpf_map__fd(pass->maps.sock_map_rx); + expected = sizeof(buf); + } else { + drop = test_sockmap_drop_prog__open_and_load(); + if (!ASSERT_OK_PTR(drop, "open_and_load")) + return; + verdict = bpf_program__fd(drop->progs.prog_skb_verdict); + map = bpf_map__fd(drop->maps.sock_map_rx); + /* On drop data is consumed immediately and copied_seq inc'd */ + expected = 0; + } - verdict = bpf_program__fd(skel->progs.prog_skb_verdict); - map = bpf_map__fd(skel->maps.sock_map_rx); err = bpf_prog_attach(verdict, map, BPF_SK_SKB_STREAM_VERDICT, 0); if (!ASSERT_OK(err, "bpf_prog_attach")) @@ -443,9 +456,12 @@ static void test_sockmap_skb_verdict_fionread(void) ASSERT_EQ(sent, sizeof(buf), "xsend(p0)"); err = ioctl(c1, FIONREAD, &avail); ASSERT_OK(err, "ioctl(FIONREAD) error"); - ASSERT_EQ(avail, sizeof(buf), "ioctl(FIONREAD)"); - recvd = recv_timeout(c1, &buf, sizeof(buf), SOCK_NONBLOCK, IO_TIMEOUT_SEC); - ASSERT_EQ(recvd, sizeof(buf), "recv_timeout(c0)"); + ASSERT_EQ(avail, expected, "ioctl(FIONREAD)"); + /* On DROP test there will be no data to read */ + if (pass_prog) { + recvd = recv_timeout(c1, &buf, sizeof(buf), SOCK_NONBLOCK, IO_TIMEOUT_SEC); + ASSERT_EQ(recvd, sizeof(buf), "recv_timeout(c0)"); + } out_close: close(c0); @@ -453,7 +469,10 @@ out_close: close(c1); close(p1); out: - test_sockmap_pass_prog__destroy(skel); + if (pass_prog) + test_sockmap_pass_prog__destroy(pass); + else + test_sockmap_drop_prog__destroy(drop); } void test_sockmap_basic(void) @@ -493,5 +512,7 @@ void test_sockmap_basic(void) if (test__start_subtest("sockmap skb_verdict shutdown")) test_sockmap_skb_verdict_shutdown(); if (test__start_subtest("sockmap skb_verdict fionread")) - test_sockmap_skb_verdict_fionread(); + test_sockmap_skb_verdict_fionread(true); + if (test__start_subtest("sockmap skb_verdict fionread on drop")) + test_sockmap_skb_verdict_fionread(false); } diff --git a/tools/testing/selftests/bpf/progs/test_sockmap_drop_prog.c b/tools/testing/selftests/bpf/progs/test_sockmap_drop_prog.c new file mode 100644 index 000000000000..29314805ce42 --- /dev/null +++ b/tools/testing/selftests/bpf/progs/test_sockmap_drop_prog.c @@ -0,0 +1,32 @@ +#include +#include +#include + +struct { + __uint(type, BPF_MAP_TYPE_SOCKMAP); + __uint(max_entries, 20); + __type(key, int); + __type(value, int); +} sock_map_rx SEC(".maps"); + +struct { + __uint(type, BPF_MAP_TYPE_SOCKMAP); + __uint(max_entries, 20); + __type(key, int); + __type(value, int); +} sock_map_tx SEC(".maps"); + +struct { + __uint(type, BPF_MAP_TYPE_SOCKMAP); + __uint(max_entries, 20); + __type(key, int); + __type(value, int); +} sock_map_msg SEC(".maps"); + +SEC("sk_skb") +int prog_skb_verdict(struct __sk_buff *skb) +{ + return SK_DROP; +} + +char _license[] SEC("license") = "GPL"; From f726e03564ef4e754dd93beb54303e2e1671049e Mon Sep 17 00:00:00 2001 From: John Fastabend Date: Mon, 22 May 2023 19:56:18 -0700 Subject: [PATCH 62/79] bpf, sockmap: Test progs verifier error with latest clang With a relatively recent clang (7090c10273119) and with this commit to fix warnings in selftests (c8ed668593972) that uses __sink(err) to resolve unused variables. We get the following verifier error. root@6e731a24b33a:/host/tools/testing/selftests/bpf# ./test_sockmap libbpf: prog 'bpf_sockmap': BPF program load failed: Permission denied libbpf: prog 'bpf_sockmap': -- BEGIN PROG LOAD LOG -- 0: R1=ctx(off=0,imm=0) R10=fp0 ; op = (int) skops->op; 0: (61) r2 = *(u32 *)(r1 +0) ; R1=ctx(off=0,imm=0) R2_w=scalar(umax=4294967295,var_off=(0x0; 0xffffffff)) ; switch (op) { 1: (16) if w2 == 0x4 goto pc+5 ; R2_w=scalar(umax=4294967295,var_off=(0x0; 0xffffffff)) 2: (56) if w2 != 0x5 goto pc+15 ; R2_w=5 ; lport = skops->local_port; 3: (61) r2 = *(u32 *)(r1 +68) ; R1=ctx(off=0,imm=0) R2_w=scalar(umax=4294967295,var_off=(0x0; 0xffffffff)) ; if (lport == 10000) { 4: (56) if w2 != 0x2710 goto pc+13 18: R1=ctx(off=0,imm=0) R2=scalar(umax=4294967295,var_off=(0x0; 0xffffffff)) R10=fp0 ; __sink(err); 18: (bc) w1 = w0 R0 !read_ok processed 18 insns (limit 1000000) max_states_per_insn 0 total_states 2 peak_states 2 mark_read 1 -- END PROG LOAD LOG -- libbpf: prog 'bpf_sockmap': failed to load: -13 libbpf: failed to load object 'test_sockmap_kern.bpf.o' load_bpf_file: (-1) No such file or directory ERROR: (-1) load bpf failed libbpf: prog 'bpf_sockmap': BPF program load failed: Permission denied libbpf: prog 'bpf_sockmap': -- BEGIN PROG LOAD LOG -- 0: R1=ctx(off=0,imm=0) R10=fp0 ; op = (int) skops->op; 0: (61) r2 = *(u32 *)(r1 +0) ; R1=ctx(off=0,imm=0) R2_w=scalar(umax=4294967295,var_off=(0x0; 0xffffffff)) ; switch (op) { 1: (16) if w2 == 0x4 goto pc+5 ; R2_w=scalar(umax=4294967295,var_off=(0x0; 0xffffffff)) 2: (56) if w2 != 0x5 goto pc+15 ; R2_w=5 ; lport = skops->local_port; 3: (61) r2 = *(u32 *)(r1 +68) ; R1=ctx(off=0,imm=0) R2_w=scalar(umax=4294967295,var_off=(0x0; 0xffffffff)) ; if (lport == 10000) { 4: (56) if w2 != 0x2710 goto pc+13 18: R1=ctx(off=0,imm=0) R2=scalar(umax=4294967295,var_off=(0x0; 0xffffffff)) R10=fp0 ; __sink(err); 18: (bc) w1 = w0 R0 !read_ok processed 18 insns (limit 1000000) max_states_per_insn 0 total_states 2 peak_states 2 mark_read 1 -- END PROG LOAD LOG -- libbpf: prog 'bpf_sockmap': failed to load: -13 libbpf: failed to load object 'test_sockhash_kern.bpf.o' load_bpf_file: (-1) No such file or directory ERROR: (-1) load bpf failed libbpf: prog 'bpf_sockmap': BPF program load failed: Permission denied libbpf: prog 'bpf_sockmap': -- BEGIN PROG LOAD LOG -- 0: R1=ctx(off=0,imm=0) R10=fp0 ; op = (int) skops->op; 0: (61) r2 = *(u32 *)(r1 +0) ; R1=ctx(off=0,imm=0) R2_w=scalar(umax=4294967295,var_off=(0x0; 0xffffffff)) ; switch (op) { 1: (16) if w2 == 0x4 goto pc+5 ; R2_w=scalar(umax=4294967295,var_off=(0x0; 0xffffffff)) 2: (56) if w2 != 0x5 goto pc+15 ; R2_w=5 ; lport = skops->local_port; 3: (61) r2 = *(u32 *)(r1 +68) ; R1=ctx(off=0,imm=0) R2_w=scalar(umax=4294967295,var_off=(0x0; 0xffffffff)) ; if (lport == 10000) { 4: (56) if w2 != 0x2710 goto pc+13 18: R1=ctx(off=0,imm=0) R2=scalar(umax=4294967295,var_off=(0x0; 0xffffffff)) R10=fp0 ; __sink(err); 18: (bc) w1 = w0 R0 !read_ok processed 18 insns (limit 1000000) max_states_per_insn 0 total_states 2 peak_states 2 mark_read 1 -- END PROG LOAD LOG -- To fix simply remove the err value because its not actually used anywhere in the testing. We can investigate the root cause later. Future patch should probably actually test the err value as well. Although if the map updates fail they will get caught eventually by userspace. Fixes: c8ed668593972 ("selftests/bpf: fix lots of silly mistakes pointed out by compiler") Signed-off-by: John Fastabend Signed-off-by: Daniel Borkmann Reviewed-by: Jakub Sitnicki Link: https://lore.kernel.org/bpf/20230523025618.113937-15-john.fastabend@gmail.com --- .../testing/selftests/bpf/progs/test_sockmap_kern.h | 12 +++++------- 1 file changed, 5 insertions(+), 7 deletions(-) diff --git a/tools/testing/selftests/bpf/progs/test_sockmap_kern.h b/tools/testing/selftests/bpf/progs/test_sockmap_kern.h index baf9ebc6d903..99d2ea9fb658 100644 --- a/tools/testing/selftests/bpf/progs/test_sockmap_kern.h +++ b/tools/testing/selftests/bpf/progs/test_sockmap_kern.h @@ -191,7 +191,7 @@ SEC("sockops") int bpf_sockmap(struct bpf_sock_ops *skops) { __u32 lport, rport; - int op, err, ret; + int op, ret; op = (int) skops->op; @@ -203,10 +203,10 @@ int bpf_sockmap(struct bpf_sock_ops *skops) if (lport == 10000) { ret = 1; #ifdef SOCKMAP - err = bpf_sock_map_update(skops, &sock_map, &ret, + bpf_sock_map_update(skops, &sock_map, &ret, BPF_NOEXIST); #else - err = bpf_sock_hash_update(skops, &sock_map, &ret, + bpf_sock_hash_update(skops, &sock_map, &ret, BPF_NOEXIST); #endif } @@ -218,10 +218,10 @@ int bpf_sockmap(struct bpf_sock_ops *skops) if (bpf_ntohl(rport) == 10001) { ret = 10; #ifdef SOCKMAP - err = bpf_sock_map_update(skops, &sock_map, &ret, + bpf_sock_map_update(skops, &sock_map, &ret, BPF_NOEXIST); #else - err = bpf_sock_hash_update(skops, &sock_map, &ret, + bpf_sock_hash_update(skops, &sock_map, &ret, BPF_NOEXIST); #endif } @@ -230,8 +230,6 @@ int bpf_sockmap(struct bpf_sock_ops *skops) break; } - __sink(err); - return 0; } From 368d3cb406cdd074d1df2ad9ec06d1bfcb664882 Mon Sep 17 00:00:00 2001 From: Yunsheng Lin Date: Mon, 22 May 2023 11:17:14 +0800 Subject: [PATCH 63/79] page_pool: fix inconsistency for page_pool_ring_[un]lock() page_pool_ring_[un]lock() use in_softirq() to decide which spin lock variant to use, and when they are called in the context with in_softirq() being false, spin_lock_bh() is called in page_pool_ring_lock() while spin_unlock() is called in page_pool_ring_unlock(), because spin_lock_bh() has disabled the softirq in page_pool_ring_lock(), which causes inconsistency for spin lock pair calling. This patch fixes it by returning in_softirq state from page_pool_producer_lock(), and use it to decide which spin lock variant to use in page_pool_producer_unlock(). As pool->ring has both producer and consumer lock, so rename it to page_pool_producer_[un]lock() to reflect the actual usage. Also move them to page_pool.c as they are only used there, and remove the 'inline' as the compiler may have better idea to do inlining or not. Fixes: 7886244736a4 ("net: page_pool: Add bulk support for ptr_ring") Signed-off-by: Yunsheng Lin Acked-by: Jesper Dangaard Brouer Acked-by: Ilias Apalodimas Link: https://lore.kernel.org/r/20230522031714.5089-1-linyunsheng@huawei.com Signed-off-by: Jakub Kicinski --- include/net/page_pool.h | 18 ------------------ net/core/page_pool.c | 28 ++++++++++++++++++++++++++-- 2 files changed, 26 insertions(+), 20 deletions(-) diff --git a/include/net/page_pool.h b/include/net/page_pool.h index c8ec2f34722b..126f9e294389 100644 --- a/include/net/page_pool.h +++ b/include/net/page_pool.h @@ -399,22 +399,4 @@ static inline void page_pool_nid_changed(struct page_pool *pool, int new_nid) page_pool_update_nid(pool, new_nid); } -static inline void page_pool_ring_lock(struct page_pool *pool) - __acquires(&pool->ring.producer_lock) -{ - if (in_softirq()) - spin_lock(&pool->ring.producer_lock); - else - spin_lock_bh(&pool->ring.producer_lock); -} - -static inline void page_pool_ring_unlock(struct page_pool *pool) - __releases(&pool->ring.producer_lock) -{ - if (in_softirq()) - spin_unlock(&pool->ring.producer_lock); - else - spin_unlock_bh(&pool->ring.producer_lock); -} - #endif /* _NET_PAGE_POOL_H */ diff --git a/net/core/page_pool.c b/net/core/page_pool.c index e212e9d7edcb..a3e12a61d456 100644 --- a/net/core/page_pool.c +++ b/net/core/page_pool.c @@ -134,6 +134,29 @@ EXPORT_SYMBOL(page_pool_ethtool_stats_get); #define recycle_stat_add(pool, __stat, val) #endif +static bool page_pool_producer_lock(struct page_pool *pool) + __acquires(&pool->ring.producer_lock) +{ + bool in_softirq = in_softirq(); + + if (in_softirq) + spin_lock(&pool->ring.producer_lock); + else + spin_lock_bh(&pool->ring.producer_lock); + + return in_softirq; +} + +static void page_pool_producer_unlock(struct page_pool *pool, + bool in_softirq) + __releases(&pool->ring.producer_lock) +{ + if (in_softirq) + spin_unlock(&pool->ring.producer_lock); + else + spin_unlock_bh(&pool->ring.producer_lock); +} + static int page_pool_init(struct page_pool *pool, const struct page_pool_params *params) { @@ -617,6 +640,7 @@ void page_pool_put_page_bulk(struct page_pool *pool, void **data, int count) { int i, bulk_len = 0; + bool in_softirq; for (i = 0; i < count; i++) { struct page *page = virt_to_head_page(data[i]); @@ -635,7 +659,7 @@ void page_pool_put_page_bulk(struct page_pool *pool, void **data, return; /* Bulk producer into ptr_ring page_pool cache */ - page_pool_ring_lock(pool); + in_softirq = page_pool_producer_lock(pool); for (i = 0; i < bulk_len; i++) { if (__ptr_ring_produce(&pool->ring, data[i])) { /* ring full */ @@ -644,7 +668,7 @@ void page_pool_put_page_bulk(struct page_pool *pool, void **data, } } recycle_stat_add(pool, ring, i); - page_pool_ring_unlock(pool); + page_pool_producer_unlock(pool, in_softirq); /* Hopefully all pages was return into ptr_ring */ if (likely(i == bulk_len)) From d6c36cbc5e533f48bd89a7b5f339bd82b8b4378a Mon Sep 17 00:00:00 2001 From: Sebastian Andrzej Siewior Date: Mon, 22 May 2023 15:41:21 +0200 Subject: [PATCH 64/79] r8169: Use a raw_spinlock_t for the register locks. The driver's interrupt service routine is requested with the IRQF_NO_THREAD if MSI is available. This means that the routine is invoked in hardirq context even on PREEMPT_RT. The routine itself is relatively short and schedules a worker, performs register access and schedules NAPI. On PREEMPT_RT, scheduling NAPI from hardirq results in waking ksoftirqd for further processing so using NAPI threads with this driver is highly recommended since it NULL routes the threaded-IRQ efforts. Adding rtl_hw_aspm_clkreq_enable() to the ISR is problematic on PREEMPT_RT because the function uses spinlock_t locks which become sleeping locks on PREEMPT_RT. The locks are only used to protect register access and don't nest into other functions or locks. They are also not used for unbounded period of time. Therefore it looks okay to convert them to raw_spinlock_t. Convert the three locks which are used from the interrupt service routine to raw_spinlock_t. Fixes: e1ed3e4d9111 ("r8169: disable ASPM during NAPI poll") Signed-off-by: Sebastian Andrzej Siewior Reviewed-by: Heiner Kallweit Link: https://lore.kernel.org/r/20230522134121.uxjax0F5@linutronix.de Signed-off-by: Jakub Kicinski --- drivers/net/ethernet/realtek/r8169_main.c | 44 +++++++++++------------ 1 file changed, 22 insertions(+), 22 deletions(-) diff --git a/drivers/net/ethernet/realtek/r8169_main.c b/drivers/net/ethernet/realtek/r8169_main.c index a7e376e7e689..4b19803a7dd0 100644 --- a/drivers/net/ethernet/realtek/r8169_main.c +++ b/drivers/net/ethernet/realtek/r8169_main.c @@ -616,10 +616,10 @@ struct rtl8169_private { struct work_struct work; } wk; - spinlock_t config25_lock; - spinlock_t mac_ocp_lock; + raw_spinlock_t config25_lock; + raw_spinlock_t mac_ocp_lock; - spinlock_t cfg9346_usage_lock; + raw_spinlock_t cfg9346_usage_lock; int cfg9346_usage_count; unsigned supports_gmii:1; @@ -671,20 +671,20 @@ static void rtl_lock_config_regs(struct rtl8169_private *tp) { unsigned long flags; - spin_lock_irqsave(&tp->cfg9346_usage_lock, flags); + raw_spin_lock_irqsave(&tp->cfg9346_usage_lock, flags); if (!--tp->cfg9346_usage_count) RTL_W8(tp, Cfg9346, Cfg9346_Lock); - spin_unlock_irqrestore(&tp->cfg9346_usage_lock, flags); + raw_spin_unlock_irqrestore(&tp->cfg9346_usage_lock, flags); } static void rtl_unlock_config_regs(struct rtl8169_private *tp) { unsigned long flags; - spin_lock_irqsave(&tp->cfg9346_usage_lock, flags); + raw_spin_lock_irqsave(&tp->cfg9346_usage_lock, flags); if (!tp->cfg9346_usage_count++) RTL_W8(tp, Cfg9346, Cfg9346_Unlock); - spin_unlock_irqrestore(&tp->cfg9346_usage_lock, flags); + raw_spin_unlock_irqrestore(&tp->cfg9346_usage_lock, flags); } static void rtl_pci_commit(struct rtl8169_private *tp) @@ -698,10 +698,10 @@ static void rtl_mod_config2(struct rtl8169_private *tp, u8 clear, u8 set) unsigned long flags; u8 val; - spin_lock_irqsave(&tp->config25_lock, flags); + raw_spin_lock_irqsave(&tp->config25_lock, flags); val = RTL_R8(tp, Config2); RTL_W8(tp, Config2, (val & ~clear) | set); - spin_unlock_irqrestore(&tp->config25_lock, flags); + raw_spin_unlock_irqrestore(&tp->config25_lock, flags); } static void rtl_mod_config5(struct rtl8169_private *tp, u8 clear, u8 set) @@ -709,10 +709,10 @@ static void rtl_mod_config5(struct rtl8169_private *tp, u8 clear, u8 set) unsigned long flags; u8 val; - spin_lock_irqsave(&tp->config25_lock, flags); + raw_spin_lock_irqsave(&tp->config25_lock, flags); val = RTL_R8(tp, Config5); RTL_W8(tp, Config5, (val & ~clear) | set); - spin_unlock_irqrestore(&tp->config25_lock, flags); + raw_spin_unlock_irqrestore(&tp->config25_lock, flags); } static bool rtl_is_8125(struct rtl8169_private *tp) @@ -899,9 +899,9 @@ static void r8168_mac_ocp_write(struct rtl8169_private *tp, u32 reg, u32 data) { unsigned long flags; - spin_lock_irqsave(&tp->mac_ocp_lock, flags); + raw_spin_lock_irqsave(&tp->mac_ocp_lock, flags); __r8168_mac_ocp_write(tp, reg, data); - spin_unlock_irqrestore(&tp->mac_ocp_lock, flags); + raw_spin_unlock_irqrestore(&tp->mac_ocp_lock, flags); } static u16 __r8168_mac_ocp_read(struct rtl8169_private *tp, u32 reg) @@ -919,9 +919,9 @@ static u16 r8168_mac_ocp_read(struct rtl8169_private *tp, u32 reg) unsigned long flags; u16 val; - spin_lock_irqsave(&tp->mac_ocp_lock, flags); + raw_spin_lock_irqsave(&tp->mac_ocp_lock, flags); val = __r8168_mac_ocp_read(tp, reg); - spin_unlock_irqrestore(&tp->mac_ocp_lock, flags); + raw_spin_unlock_irqrestore(&tp->mac_ocp_lock, flags); return val; } @@ -932,10 +932,10 @@ static void r8168_mac_ocp_modify(struct rtl8169_private *tp, u32 reg, u16 mask, unsigned long flags; u16 data; - spin_lock_irqsave(&tp->mac_ocp_lock, flags); + raw_spin_lock_irqsave(&tp->mac_ocp_lock, flags); data = __r8168_mac_ocp_read(tp, reg); __r8168_mac_ocp_write(tp, reg, (data & ~mask) | set); - spin_unlock_irqrestore(&tp->mac_ocp_lock, flags); + raw_spin_unlock_irqrestore(&tp->mac_ocp_lock, flags); } /* Work around a hw issue with RTL8168g PHY, the quirk disables @@ -1420,14 +1420,14 @@ static void __rtl8169_set_wol(struct rtl8169_private *tp, u32 wolopts) r8168_mac_ocp_modify(tp, 0xc0b6, BIT(0), 0); } - spin_lock_irqsave(&tp->config25_lock, flags); + raw_spin_lock_irqsave(&tp->config25_lock, flags); for (i = 0; i < tmp; i++) { options = RTL_R8(tp, cfg[i].reg) & ~cfg[i].mask; if (wolopts & cfg[i].opt) options |= cfg[i].mask; RTL_W8(tp, cfg[i].reg, options); } - spin_unlock_irqrestore(&tp->config25_lock, flags); + raw_spin_unlock_irqrestore(&tp->config25_lock, flags); switch (tp->mac_version) { case RTL_GIGA_MAC_VER_02 ... RTL_GIGA_MAC_VER_06: @@ -5179,9 +5179,9 @@ static int rtl_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) tp->eee_adv = -1; tp->ocp_base = OCP_STD_PHY_BASE; - spin_lock_init(&tp->cfg9346_usage_lock); - spin_lock_init(&tp->config25_lock); - spin_lock_init(&tp->mac_ocp_lock); + raw_spin_lock_init(&tp->cfg9346_usage_lock); + raw_spin_lock_init(&tp->config25_lock); + raw_spin_lock_init(&tp->mac_ocp_lock); dev->tstats = devm_netdev_alloc_pcpu_stats(&pdev->dev, struct pcpu_sw_netstats); From 8a02fb71d7192ff1a9a47c9d937624966c6e09af Mon Sep 17 00:00:00 2001 From: Pratyush Yadav Date: Mon, 22 May 2023 17:30:20 +0200 Subject: [PATCH 65/79] net: fix skb leak in __skb_tstamp_tx() Commit 50749f2dd685 ("tcp/udp: Fix memleaks of sk and zerocopy skbs with TX timestamp.") added a call to skb_orphan_frags_rx() to fix leaks with zerocopy skbs. But it ended up adding a leak of its own. When skb_orphan_frags_rx() fails, the function just returns, leaking the skb it just cloned. Free it before returning. This bug was discovered and resolved using Coverity Static Analysis Security Testing (SAST) by Synopsys, Inc. Fixes: 50749f2dd685 ("tcp/udp: Fix memleaks of sk and zerocopy skbs with TX timestamp.") Signed-off-by: Pratyush Yadav Reviewed-by: Kuniyuki Iwashima Reviewed-by: Willem de Bruijn Link: https://lore.kernel.org/r/20230522153020.32422-1-ptyadav@amazon.de Signed-off-by: Jakub Kicinski --- net/core/skbuff.c | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/net/core/skbuff.c b/net/core/skbuff.c index 515ec5cdc79c..cea28d30abb5 100644 --- a/net/core/skbuff.c +++ b/net/core/skbuff.c @@ -5224,8 +5224,10 @@ void __skb_tstamp_tx(struct sk_buff *orig_skb, } else { skb = skb_clone(orig_skb, GFP_ATOMIC); - if (skb_orphan_frags_rx(skb, GFP_ATOMIC)) + if (skb_orphan_frags_rx(skb, GFP_ATOMIC)) { + kfree_skb(skb); return; + } } if (!skb) return; From 7e7b3b097a9d95cd76fb462f83dc740b71bd73c0 Mon Sep 17 00:00:00 2001 From: Jakub Kicinski Date: Mon, 22 May 2023 16:09:03 -0700 Subject: [PATCH 66/79] docs: netdev: document the existence of the mail bot We had a good run, but after 4 weeks of use we heard someone asking about pw-bot commands. Let's explain its existence in the docs. It's not a complete documentation but hopefully it's enough for the casual contributor. The project and scope are in flux so the details would likely become out of date, if we were to document more in depth. Link: https://lore.kernel.org/all/20230522140057.GB18381@nucnuc.mle/ Reviewed-by: Andrew Lunn Reviewed-by: Simon Horman Link: https://lore.kernel.org/r/20230522230903.1853151-1-kuba@kernel.org Signed-off-by: Jakub Kicinski --- Documentation/process/maintainer-netdev.rst | 33 ++++++++++++++++----- 1 file changed, 26 insertions(+), 7 deletions(-) diff --git a/Documentation/process/maintainer-netdev.rst b/Documentation/process/maintainer-netdev.rst index f73ac9e175a8..83614cec9328 100644 --- a/Documentation/process/maintainer-netdev.rst +++ b/Documentation/process/maintainer-netdev.rst @@ -127,13 +127,32 @@ the value of ``Message-ID`` to the URL above. Updating patch status ~~~~~~~~~~~~~~~~~~~~~ -It may be tempting to help the maintainers and update the state of your -own patches when you post a new version or spot a bug. Please **do not** -do that. -Interfering with the patch status on patchwork will only cause confusion. Leave -it to the maintainer to figure out what is the most recent and current -version that should be applied. If there is any doubt, the maintainer -will reply and ask what should be done. +Contributors and reviewers do not have the permissions to update patch +state directly in patchwork. Patchwork doesn't expose much information +about the history of the state of patches, therefore having multiple +people update the state leads to confusion. + +Instead of delegating patchwork permissions netdev uses a simple mail +bot which looks for special commands/lines within the emails sent to +the mailing list. For example to mark a series as Changes Requested +one needs to send the following line anywhere in the email thread:: + + pw-bot: changes-requested + +As a result the bot will set the entire series to Changes Requested. +This may be useful when author discovers a bug in their own series +and wants to prevent it from getting applied. + +The use of the bot is entirely optional, if in doubt ignore its existence +completely. Maintainers will classify and update the state of the patches +themselves. No email should ever be sent to the list with the main purpose +of communicating with the bot, the bot commands should be seen as metadata. + +The use of the bot is restricted to authors of the patches (the ``From:`` +header on patch submission and command must match!), maintainers themselves +and a handful of senior reviewers. Bot records its activity here: + + https://patchwork.hopto.org/pw-bot.html Review timelines ~~~~~~~~~~~~~~~~ From 04910d8cbfed65dad21c31723c6c1a8d9f990fb6 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Ar=C4=B1n=C3=A7=20=C3=9CNAL?= Date: Mon, 22 May 2023 13:57:43 +0300 Subject: [PATCH 67/79] net: ethernet: mtk_eth_soc: fix QoS on DSA MAC on non MTK_NETSYS_V2 SoCs MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit The commit c6d96df9fa2c ("net: ethernet: mtk_eth_soc: drop generic vlan rx offload, only use DSA untagging") makes VLAN RX offloading to be only used on the SoCs without the MTK_NETSYS_V2 ability (which are not just MT7621 and MT7622). The commit disables the proper handling of special tagged (DSA) frames, added with commit 87e3df4961f4 ("net-next: ethernet: mediatek: add CDM able to recognize the tag for DSA"), for non MTK_NETSYS_V2 SoCs when it finds a MAC that does not use DSA. So if the other MAC uses DSA, the CDMQ component transmits DSA tagged frames to the CPU improperly. This issue can be observed on frames with TCP, for example, a TCP speed test using iperf3 won't work. The commit disables the proper handling of special tagged (DSA) frames because it assumes that these SoCs don't use more than one MAC, which is wrong. Although I made Frank address this false assumption on the patch log when they sent the patch on behalf of Felix, the code still made changes with this assumption. Therefore, the proper handling of special tagged (DSA) frames must be kept enabled in all circumstances as it doesn't affect non DSA tagged frames. Hardware DSA untagging, introduced with the commit 2d7605a72906 ("net: ethernet: mtk_eth_soc: enable hardware DSA untagging"), and VLAN RX offloading are operations on the two CDM components of the frame engine, CDMP and CDMQ, which connect to Packet DMA (PDMA) and QoS DMA (QDMA) and are between the MACs and the CPU. These operations apply to all MACs of the SoC so if one MAC uses DSA and the other doesn't, the hardware DSA untagging operation will cause the CDMP component to transmit non DSA tagged frames to the CPU improperly. Since the VLAN RX offloading feature configuration was dropped, VLAN RX offloading can only be used along with hardware DSA untagging. So, for the case above, we need to disable both features and leave it to the CPU, therefore software, to untag the DSA and VLAN tags. So the correct way to handle this is: For all SoCs: Enable the proper handling of special tagged (DSA) frames (MTK_CDMQ_IG_CTRL). For non MTK_NETSYS_V2 SoCs: Enable hardware DSA untagging (MTK_CDMP_IG_CTRL). Enable VLAN RX offloading (MTK_CDMP_EG_CTRL). When a non MTK_NETSYS_V2 SoC MAC does not use DSA: Disable hardware DSA untagging (MTK_CDMP_IG_CTRL). Disable VLAN RX offloading (MTK_CDMP_EG_CTRL). Fixes: c6d96df9fa2c ("net: ethernet: mtk_eth_soc: drop generic vlan rx offload, only use DSA untagging") Signed-off-by: Arınç ÜNAL Signed-off-by: David S. Miller --- drivers/net/ethernet/mediatek/mtk_eth_soc.c | 8 ++------ 1 file changed, 2 insertions(+), 6 deletions(-) diff --git a/drivers/net/ethernet/mediatek/mtk_eth_soc.c b/drivers/net/ethernet/mediatek/mtk_eth_soc.c index a75fd072082c..834c644b67db 100644 --- a/drivers/net/ethernet/mediatek/mtk_eth_soc.c +++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.c @@ -3269,18 +3269,14 @@ static int mtk_open(struct net_device *dev) eth->dsa_meta[i] = md_dst; } } else { - /* Hardware special tag parsing needs to be disabled if at least - * one MAC does not use DSA. + /* Hardware DSA untagging and VLAN RX offloading need to be + * disabled if at least one MAC does not use DSA. */ u32 val = mtk_r32(eth, MTK_CDMP_IG_CTRL); val &= ~MTK_CDMP_STAG_EN; mtk_w32(eth, val, MTK_CDMP_IG_CTRL); - val = mtk_r32(eth, MTK_CDMQ_IG_CTRL); - val &= ~MTK_CDMQ_STAG_EN; - mtk_w32(eth, val, MTK_CDMQ_IG_CTRL); - mtk_w32(eth, 0, MTK_CDMP_EG_CTRL); } From 878ecb0897f4737a4c9401f3523fd49589025671 Mon Sep 17 00:00:00 2001 From: Gavrilov Ilia Date: Tue, 23 May 2023 08:29:44 +0000 Subject: [PATCH 68/79] ipv6: Fix out-of-bounds access in ipv6_find_tlv() optlen is fetched without checking whether there is more than one byte to parse. It can lead to out-of-bounds access. Found by InfoTeCS on behalf of Linux Verification Center (linuxtesting.org) with SVACE. Fixes: c61a40432509 ("[IPV6]: Find option offset by type.") Signed-off-by: Gavrilov Ilia Reviewed-by: Jiri Pirko Reviewed-by: David Ahern Signed-off-by: David S. Miller --- net/ipv6/exthdrs_core.c | 2 ++ 1 file changed, 2 insertions(+) diff --git a/net/ipv6/exthdrs_core.c b/net/ipv6/exthdrs_core.c index da46c4284676..49e31e4ae7b7 100644 --- a/net/ipv6/exthdrs_core.c +++ b/net/ipv6/exthdrs_core.c @@ -143,6 +143,8 @@ int ipv6_find_tlv(const struct sk_buff *skb, int offset, int type) optlen = 1; break; default: + if (len < 2) + goto bad; optlen = nh[offset + 1] + 2; if (optlen > len) goto bad; From a095326e2c0f33743ce8e887d5b90edf3f36cced Mon Sep 17 00:00:00 2001 From: Chuck Lever Date: Thu, 11 May 2023 11:47:09 -0400 Subject: [PATCH 69/79] net/handshake: Remove unneeded check from handshake_dup() handshake_req_submit() now verifies that the socket has a file. Fixes: 3b3009ea8abb ("net/handshake: Create a NETLINK service for handling handshake requests") Reviewed-by: Simon Horman Signed-off-by: Chuck Lever Signed-off-by: Jakub Kicinski --- net/handshake/netlink.c | 3 --- 1 file changed, 3 deletions(-) diff --git a/net/handshake/netlink.c b/net/handshake/netlink.c index 35c9c445e0b8..7ec8a76c3c8a 100644 --- a/net/handshake/netlink.c +++ b/net/handshake/netlink.c @@ -99,9 +99,6 @@ static int handshake_dup(struct socket *sock) struct file *file; int newfd; - if (!sock->file) - return -EBADF; - file = get_file(sock->file); newfd = get_unused_fd_flags(O_CLOEXEC); if (newfd < 0) { From 7ea9c1ec66bc099b0bfba961a8a46dfe25d7d8e4 Mon Sep 17 00:00:00 2001 From: Chuck Lever Date: Thu, 11 May 2023 11:47:40 -0400 Subject: [PATCH 70/79] net/handshake: Fix handshake_dup() ref counting If get_unused_fd_flags() fails, we ended up calling fput(sock->file) twice. Reported-by: Dan Carpenter Suggested-by: Paolo Abeni Fixes: 3b3009ea8abb ("net/handshake: Create a NETLINK service for handling handshake requests") Signed-off-by: Chuck Lever Signed-off-by: Jakub Kicinski --- net/handshake/netlink.c | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/net/handshake/netlink.c b/net/handshake/netlink.c index 7ec8a76c3c8a..f5dc170689d9 100644 --- a/net/handshake/netlink.c +++ b/net/handshake/netlink.c @@ -139,15 +139,16 @@ int handshake_nl_accept_doit(struct sk_buff *skb, struct genl_info *info) goto out_complete; } err = req->hr_proto->hp_accept(req, info, fd); - if (err) + if (err) { + fput(sock->file); goto out_complete; + } trace_handshake_cmd_accept(net, req, req->hr_sk, fd); return 0; out_complete: handshake_complete(req, -EIO, NULL); - fput(sock->file); out_status: trace_handshake_cmd_accept_err(net, req, NULL, err); return err; From 7afc6d0a107ffbd448c96eb2458b9e64a5af7860 Mon Sep 17 00:00:00 2001 From: Chuck Lever Date: Thu, 11 May 2023 11:48:13 -0400 Subject: [PATCH 71/79] net/handshake: Fix uninitialized local variable trace_handshake_cmd_done_err() simply records the pointer in @req, so initializing it to NULL is sufficient and safe. Reported-by: Dan Carpenter Fixes: 3b3009ea8abb ("net/handshake: Create a NETLINK service for handling handshake requests") Reviewed-by: Simon Horman Signed-off-by: Chuck Lever Signed-off-by: Jakub Kicinski --- net/handshake/netlink.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/net/handshake/netlink.c b/net/handshake/netlink.c index f5dc170689d9..16a4bde648ba 100644 --- a/net/handshake/netlink.c +++ b/net/handshake/netlink.c @@ -157,8 +157,8 @@ out_status: int handshake_nl_done_doit(struct sk_buff *skb, struct genl_info *info) { struct net *net = sock_net(skb->sk); + struct handshake_req *req = NULL; struct socket *sock = NULL; - struct handshake_req *req; int fd, status, err; if (GENL_REQ_ATTR_CHECK(info, HANDSHAKE_A_DONE_SOCKFD)) From fc490880e39d86c65ab2bcbd357af1950fa55e48 Mon Sep 17 00:00:00 2001 From: Chuck Lever Date: Thu, 11 May 2023 11:48:45 -0400 Subject: [PATCH 72/79] net/handshake: handshake_genl_notify() shouldn't ignore @flags Reported-by: Dan Carpenter Fixes: 3b3009ea8abb ("net/handshake: Create a NETLINK service for handling handshake requests") Reviewed-by: Simon Horman Signed-off-by: Chuck Lever Signed-off-by: Jakub Kicinski --- net/handshake/netlink.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/net/handshake/netlink.c b/net/handshake/netlink.c index 16a4bde648ba..1086653e1fad 100644 --- a/net/handshake/netlink.c +++ b/net/handshake/netlink.c @@ -48,7 +48,7 @@ int handshake_genl_notify(struct net *net, const struct handshake_proto *proto, proto->hp_handler_class)) return -ESRCH; - msg = genlmsg_new(GENLMSG_DEFAULT_SIZE, GFP_KERNEL); + msg = genlmsg_new(GENLMSG_DEFAULT_SIZE, flags); if (!msg) return -ENOMEM; From 1ce77c998f0415d7d9d91cb9bd7665e25c8f75f1 Mon Sep 17 00:00:00 2001 From: Chuck Lever Date: Thu, 11 May 2023 11:49:17 -0400 Subject: [PATCH 73/79] net/handshake: Unpin sock->file if a handshake is cancelled If user space never calls DONE, sock->file's reference count remains elevated. Enable sock->file to be freed eventually in this case. Reported-by: Jakub Kacinski Fixes: 3b3009ea8abb ("net/handshake: Create a NETLINK service for handling handshake requests") Signed-off-by: Chuck Lever Signed-off-by: Jakub Kicinski --- net/handshake/handshake.h | 1 + net/handshake/request.c | 4 ++++ 2 files changed, 5 insertions(+) diff --git a/net/handshake/handshake.h b/net/handshake/handshake.h index 4dac965c99df..8aeaadca844f 100644 --- a/net/handshake/handshake.h +++ b/net/handshake/handshake.h @@ -31,6 +31,7 @@ struct handshake_req { struct list_head hr_list; struct rhash_head hr_rhash; unsigned long hr_flags; + struct file *hr_file; const struct handshake_proto *hr_proto; struct sock *hr_sk; void (*hr_odestruct)(struct sock *sk); diff --git a/net/handshake/request.c b/net/handshake/request.c index 94d5cef3e048..d78d41abb3d9 100644 --- a/net/handshake/request.c +++ b/net/handshake/request.c @@ -239,6 +239,7 @@ int handshake_req_submit(struct socket *sock, struct handshake_req *req, } req->hr_odestruct = req->hr_sk->sk_destruct; req->hr_sk->sk_destruct = handshake_sk_destruct; + req->hr_file = sock->file; ret = -EOPNOTSUPP; net = sock_net(req->hr_sk); @@ -334,6 +335,9 @@ bool handshake_req_cancel(struct sock *sk) return false; } + /* Request accepted and waiting for DONE */ + fput(req->hr_file); + out_true: trace_handshake_cancel(net, req, sk); From 26fb5480a27d34975cc2b680b77af189620dd740 Mon Sep 17 00:00:00 2001 From: Chuck Lever Date: Thu, 11 May 2023 11:49:50 -0400 Subject: [PATCH 74/79] net/handshake: Enable the SNI extension to work properly Enable the upper layer protocol to specify the SNI peername. This avoids the need for tlshd to use a DNS lookup, which can return a hostname that doesn't match the incoming certificate's SubjectName. Fixes: 2fd5532044a8 ("net/handshake: Add a kernel API for requesting a TLSv1.3 handshake") Reviewed-by: Simon Horman Signed-off-by: Chuck Lever Signed-off-by: Jakub Kicinski --- Documentation/netlink/specs/handshake.yaml | 4 ++++ Documentation/networking/tls-handshake.rst | 5 +++++ include/net/handshake.h | 1 + include/uapi/linux/handshake.h | 1 + net/handshake/tlshd.c | 8 ++++++++ 5 files changed, 19 insertions(+) diff --git a/Documentation/netlink/specs/handshake.yaml b/Documentation/netlink/specs/handshake.yaml index 614f1a585511..6d89e30f5fd5 100644 --- a/Documentation/netlink/specs/handshake.yaml +++ b/Documentation/netlink/specs/handshake.yaml @@ -68,6 +68,9 @@ attribute-sets: type: nest nested-attributes: x509 multi-attr: true + - + name: peername + type: string - name: done attributes: @@ -105,6 +108,7 @@ operations: - auth-mode - peer-identity - certificate + - peername - name: done doc: Handler reports handshake completion diff --git a/Documentation/networking/tls-handshake.rst b/Documentation/networking/tls-handshake.rst index a2817a88e905..6f5ea1646a47 100644 --- a/Documentation/networking/tls-handshake.rst +++ b/Documentation/networking/tls-handshake.rst @@ -53,6 +53,7 @@ fills in a structure that contains the parameters of the request: struct socket *ta_sock; tls_done_func_t ta_done; void *ta_data; + const char *ta_peername; unsigned int ta_timeout_ms; key_serial_t ta_keyring; key_serial_t ta_my_cert; @@ -71,6 +72,10 @@ instantiated a struct file in sock->file. has completed. Further explanation of this function is in the "Handshake Completion" sesction below. +The consumer can provide a NUL-terminated hostname in the @ta_peername +field that is sent as part of ClientHello. If no peername is provided, +the DNS hostname associated with the server's IP address is used instead. + The consumer can fill in the @ta_timeout_ms field to force the servicing handshake agent to exit after a number of milliseconds. This enables the socket to be fully closed once both the kernel and the handshake agent diff --git a/include/net/handshake.h b/include/net/handshake.h index 3352b1ab43b3..2e26e436e85f 100644 --- a/include/net/handshake.h +++ b/include/net/handshake.h @@ -24,6 +24,7 @@ struct tls_handshake_args { struct socket *ta_sock; tls_done_func_t ta_done; void *ta_data; + const char *ta_peername; unsigned int ta_timeout_ms; key_serial_t ta_keyring; key_serial_t ta_my_cert; diff --git a/include/uapi/linux/handshake.h b/include/uapi/linux/handshake.h index 1de4d0b95325..3d7ea58778c9 100644 --- a/include/uapi/linux/handshake.h +++ b/include/uapi/linux/handshake.h @@ -44,6 +44,7 @@ enum { HANDSHAKE_A_ACCEPT_AUTH_MODE, HANDSHAKE_A_ACCEPT_PEER_IDENTITY, HANDSHAKE_A_ACCEPT_CERTIFICATE, + HANDSHAKE_A_ACCEPT_PEERNAME, __HANDSHAKE_A_ACCEPT_MAX, HANDSHAKE_A_ACCEPT_MAX = (__HANDSHAKE_A_ACCEPT_MAX - 1) diff --git a/net/handshake/tlshd.c b/net/handshake/tlshd.c index fcbeb63b4eb1..b735f5cced2f 100644 --- a/net/handshake/tlshd.c +++ b/net/handshake/tlshd.c @@ -31,6 +31,7 @@ struct tls_handshake_req { int th_type; unsigned int th_timeout_ms; int th_auth_mode; + const char *th_peername; key_serial_t th_keyring; key_serial_t th_certificate; key_serial_t th_privkey; @@ -48,6 +49,7 @@ tls_handshake_req_init(struct handshake_req *req, treq->th_timeout_ms = args->ta_timeout_ms; treq->th_consumer_done = args->ta_done; treq->th_consumer_data = args->ta_data; + treq->th_peername = args->ta_peername; treq->th_keyring = args->ta_keyring; treq->th_num_peerids = 0; treq->th_certificate = TLS_NO_CERT; @@ -214,6 +216,12 @@ static int tls_handshake_accept(struct handshake_req *req, ret = nla_put_u32(msg, HANDSHAKE_A_ACCEPT_MESSAGE_TYPE, treq->th_type); if (ret < 0) goto out_cancel; + if (treq->th_peername) { + ret = nla_put_string(msg, HANDSHAKE_A_ACCEPT_PEERNAME, + treq->th_peername); + if (ret < 0) + goto out_cancel; + } if (treq->th_timeout_ms) { ret = nla_put_u32(msg, HANDSHAKE_A_ACCEPT_TIMEOUT, treq->th_timeout_ms); if (ret < 0) From 57fb54ab9f6945e204740b696bd4cee61ee04e5e Mon Sep 17 00:00:00 2001 From: David Epping Date: Tue, 23 May 2023 17:31:05 +0200 Subject: [PATCH 75/79] net: phy: mscc: add VSC8502 to MODULE_DEVICE_TABLE The mscc driver implements support for VSC8502, so its ID should be in the MODULE_DEVICE_TABLE for automatic loading. Signed-off-by: David Epping Fixes: d3169863310d ("net: phy: mscc: add support for VSC8502") Reviewed-by: Vladimir Oltean Signed-off-by: Jakub Kicinski --- drivers/net/phy/mscc/mscc_main.c | 1 + 1 file changed, 1 insertion(+) diff --git a/drivers/net/phy/mscc/mscc_main.c b/drivers/net/phy/mscc/mscc_main.c index 62bf99e45af1..bd81a4b041e5 100644 --- a/drivers/net/phy/mscc/mscc_main.c +++ b/drivers/net/phy/mscc/mscc_main.c @@ -2656,6 +2656,7 @@ static struct phy_driver vsc85xx_driver[] = { module_phy_driver(vsc85xx_driver); static struct mdio_device_id __maybe_unused vsc85xx_tbl[] = { + { PHY_ID_VSC8502, 0xfffffff0, }, { PHY_ID_VSC8504, 0xfffffff0, }, { PHY_ID_VSC8514, 0xfffffff0, }, { PHY_ID_VSC8530, 0xfffffff0, }, From fb055ce4a9e3a115f5dc42011a97cf0cfc7820e4 Mon Sep 17 00:00:00 2001 From: David Epping Date: Tue, 23 May 2023 17:31:06 +0200 Subject: [PATCH 76/79] net: phy: mscc: add support for VSC8501 The VSC8501 PHY can use the same driver implementation as the VSC8502. Adding the PHY ID and copying the handler functions of VSC8502 is sufficient to operate it. Signed-off-by: David Epping Reviewed-by: Vladimir Oltean Signed-off-by: Jakub Kicinski --- drivers/net/phy/mscc/mscc.h | 1 + drivers/net/phy/mscc/mscc_main.c | 25 +++++++++++++++++++++++++ 2 files changed, 26 insertions(+) diff --git a/drivers/net/phy/mscc/mscc.h b/drivers/net/phy/mscc/mscc.h index a50235fdf7d9..79cbb2418664 100644 --- a/drivers/net/phy/mscc/mscc.h +++ b/drivers/net/phy/mscc/mscc.h @@ -276,6 +276,7 @@ enum rgmii_clock_delay { /* Microsemi PHY ID's * Code assumes lowest nibble is 0 */ +#define PHY_ID_VSC8501 0x00070530 #define PHY_ID_VSC8502 0x00070630 #define PHY_ID_VSC8504 0x000704c0 #define PHY_ID_VSC8514 0x00070670 diff --git a/drivers/net/phy/mscc/mscc_main.c b/drivers/net/phy/mscc/mscc_main.c index bd81a4b041e5..29fc27a16805 100644 --- a/drivers/net/phy/mscc/mscc_main.c +++ b/drivers/net/phy/mscc/mscc_main.c @@ -2316,6 +2316,30 @@ static int vsc85xx_probe(struct phy_device *phydev) /* Microsemi VSC85xx PHYs */ static struct phy_driver vsc85xx_driver[] = { +{ + .phy_id = PHY_ID_VSC8501, + .name = "Microsemi GE VSC8501 SyncE", + .phy_id_mask = 0xfffffff0, + /* PHY_BASIC_FEATURES */ + .soft_reset = &genphy_soft_reset, + .config_init = &vsc85xx_config_init, + .config_aneg = &vsc85xx_config_aneg, + .read_status = &vsc85xx_read_status, + .handle_interrupt = vsc85xx_handle_interrupt, + .config_intr = &vsc85xx_config_intr, + .suspend = &genphy_suspend, + .resume = &genphy_resume, + .probe = &vsc85xx_probe, + .set_wol = &vsc85xx_wol_set, + .get_wol = &vsc85xx_wol_get, + .get_tunable = &vsc85xx_get_tunable, + .set_tunable = &vsc85xx_set_tunable, + .read_page = &vsc85xx_phy_read_page, + .write_page = &vsc85xx_phy_write_page, + .get_sset_count = &vsc85xx_get_sset_count, + .get_strings = &vsc85xx_get_strings, + .get_stats = &vsc85xx_get_stats, +}, { .phy_id = PHY_ID_VSC8502, .name = "Microsemi GE VSC8502 SyncE", @@ -2656,6 +2680,7 @@ static struct phy_driver vsc85xx_driver[] = { module_phy_driver(vsc85xx_driver); static struct mdio_device_id __maybe_unused vsc85xx_tbl[] = { + { PHY_ID_VSC8501, 0xfffffff0, }, { PHY_ID_VSC8502, 0xfffffff0, }, { PHY_ID_VSC8504, 0xfffffff0, }, { PHY_ID_VSC8514, 0xfffffff0, }, From 7df0b33d7993338a06e4039ec025bb67851ee41d Mon Sep 17 00:00:00 2001 From: David Epping Date: Tue, 23 May 2023 17:31:07 +0200 Subject: [PATCH 77/79] net: phy: mscc: remove unnecessary phydev locking Holding the struct phy_device (phydev) lock is unnecessary when accessing phydev->interface in the PHY driver .config_init method, which is the only place that vsc85xx_rgmii_set_skews() is called from. The phy_modify_paged() function implements required MDIO bus level locking, which can not be achieved by a phydev lock. Signed-off-by: David Epping Reviewed-by: Russell King (Oracle) Signed-off-by: Jakub Kicinski --- drivers/net/phy/mscc/mscc_main.c | 4 ---- 1 file changed, 4 deletions(-) diff --git a/drivers/net/phy/mscc/mscc_main.c b/drivers/net/phy/mscc/mscc_main.c index 29fc27a16805..0c39b3ecb1f2 100644 --- a/drivers/net/phy/mscc/mscc_main.c +++ b/drivers/net/phy/mscc/mscc_main.c @@ -528,8 +528,6 @@ static int vsc85xx_rgmii_set_skews(struct phy_device *phydev, u32 rgmii_cntl, u16 reg_val = 0; int rc; - mutex_lock(&phydev->lock); - if (phydev->interface == PHY_INTERFACE_MODE_RGMII_RXID || phydev->interface == PHY_INTERFACE_MODE_RGMII_ID) reg_val |= RGMII_CLK_DELAY_2_0_NS << rgmii_rx_delay_pos; @@ -542,8 +540,6 @@ static int vsc85xx_rgmii_set_skews(struct phy_device *phydev, u32 rgmii_cntl, rgmii_rx_delay_mask | rgmii_tx_delay_mask, reg_val); - mutex_unlock(&phydev->lock); - return rc; } From 71460c9ec5c743e9ffffca3c874d66267c36345e Mon Sep 17 00:00:00 2001 From: David Epping Date: Tue, 23 May 2023 17:31:08 +0200 Subject: [PATCH 78/79] net: phy: mscc: enable VSC8501/2 RGMII RX clock By default the VSC8501 and VSC8502 RGMII/GMII/MII RX_CLK output is disabled. To allow packet forwarding towards the MAC it needs to be enabled. For other PHYs supported by this driver the clock output is enabled by default. Fixes: d3169863310d ("net: phy: mscc: add support for VSC8502") Signed-off-by: David Epping Reviewed-by: Russell King (Oracle) Reviewed-by: Vladimir Oltean Signed-off-by: Jakub Kicinski --- drivers/net/phy/mscc/mscc.h | 1 + drivers/net/phy/mscc/mscc_main.c | 54 +++++++++++++++++--------------- 2 files changed, 29 insertions(+), 26 deletions(-) diff --git a/drivers/net/phy/mscc/mscc.h b/drivers/net/phy/mscc/mscc.h index 79cbb2418664..defe5cc6d4fc 100644 --- a/drivers/net/phy/mscc/mscc.h +++ b/drivers/net/phy/mscc/mscc.h @@ -179,6 +179,7 @@ enum rgmii_clock_delay { #define VSC8502_RGMII_CNTL 20 #define VSC8502_RGMII_RX_DELAY_MASK 0x0070 #define VSC8502_RGMII_TX_DELAY_MASK 0x0007 +#define VSC8502_RGMII_RX_CLK_DISABLE 0x0800 #define MSCC_PHY_WOL_LOWER_MAC_ADDR 21 #define MSCC_PHY_WOL_MID_MAC_ADDR 22 diff --git a/drivers/net/phy/mscc/mscc_main.c b/drivers/net/phy/mscc/mscc_main.c index 0c39b3ecb1f2..28df8a2e4230 100644 --- a/drivers/net/phy/mscc/mscc_main.c +++ b/drivers/net/phy/mscc/mscc_main.c @@ -519,14 +519,27 @@ out_unlock: * * 2.0 ns (which causes the data to be sampled at exactly half way between * clock transitions at 1000 Mbps) if delays should be enabled */ -static int vsc85xx_rgmii_set_skews(struct phy_device *phydev, u32 rgmii_cntl, - u16 rgmii_rx_delay_mask, - u16 rgmii_tx_delay_mask) +static int vsc85xx_update_rgmii_cntl(struct phy_device *phydev, u32 rgmii_cntl, + u16 rgmii_rx_delay_mask, + u16 rgmii_tx_delay_mask) { u16 rgmii_rx_delay_pos = ffs(rgmii_rx_delay_mask) - 1; u16 rgmii_tx_delay_pos = ffs(rgmii_tx_delay_mask) - 1; u16 reg_val = 0; - int rc; + u16 mask = 0; + int rc = 0; + + /* For traffic to pass, the VSC8502 family needs the RX_CLK disable bit + * to be unset for all PHY modes, so do that as part of the paged + * register modification. + * For some family members (like VSC8530/31/40/41) this bit is reserved + * and read-only, and the RX clock is enabled by default. + */ + if (rgmii_cntl == VSC8502_RGMII_CNTL) + mask |= VSC8502_RGMII_RX_CLK_DISABLE; + + if (phy_interface_is_rgmii(phydev)) + mask |= rgmii_rx_delay_mask | rgmii_tx_delay_mask; if (phydev->interface == PHY_INTERFACE_MODE_RGMII_RXID || phydev->interface == PHY_INTERFACE_MODE_RGMII_ID) @@ -535,29 +548,20 @@ static int vsc85xx_rgmii_set_skews(struct phy_device *phydev, u32 rgmii_cntl, phydev->interface == PHY_INTERFACE_MODE_RGMII_ID) reg_val |= RGMII_CLK_DELAY_2_0_NS << rgmii_tx_delay_pos; - rc = phy_modify_paged(phydev, MSCC_PHY_PAGE_EXTENDED_2, - rgmii_cntl, - rgmii_rx_delay_mask | rgmii_tx_delay_mask, - reg_val); + if (mask) + rc = phy_modify_paged(phydev, MSCC_PHY_PAGE_EXTENDED_2, + rgmii_cntl, mask, reg_val); return rc; } static int vsc85xx_default_config(struct phy_device *phydev) { - int rc; - phydev->mdix_ctrl = ETH_TP_MDI_AUTO; - if (phy_interface_mode_is_rgmii(phydev->interface)) { - rc = vsc85xx_rgmii_set_skews(phydev, VSC8502_RGMII_CNTL, - VSC8502_RGMII_RX_DELAY_MASK, - VSC8502_RGMII_TX_DELAY_MASK); - if (rc) - return rc; - } - - return 0; + return vsc85xx_update_rgmii_cntl(phydev, VSC8502_RGMII_CNTL, + VSC8502_RGMII_RX_DELAY_MASK, + VSC8502_RGMII_TX_DELAY_MASK); } static int vsc85xx_get_tunable(struct phy_device *phydev, @@ -1754,13 +1758,11 @@ static int vsc8584_config_init(struct phy_device *phydev) if (ret) return ret; - if (phy_interface_is_rgmii(phydev)) { - ret = vsc85xx_rgmii_set_skews(phydev, VSC8572_RGMII_CNTL, - VSC8572_RGMII_RX_DELAY_MASK, - VSC8572_RGMII_TX_DELAY_MASK); - if (ret) - return ret; - } + ret = vsc85xx_update_rgmii_cntl(phydev, VSC8572_RGMII_CNTL, + VSC8572_RGMII_RX_DELAY_MASK, + VSC8572_RGMII_TX_DELAY_MASK); + if (ret) + return ret; ret = genphy_soft_reset(phydev); if (ret) From ad42a35bdfc6d3c0fc4cb4027d7b2757ce665665 Mon Sep 17 00:00:00 2001 From: Kuniyuki Iwashima Date: Tue, 23 May 2023 09:33:05 -0700 Subject: [PATCH 79/79] udplite: Fix NULL pointer dereference in __sk_mem_raise_allocated(). syzbot reported [0] a null-ptr-deref in sk_get_rmem0() while using IPPROTO_UDPLITE (0x88): 14:25:52 executing program 1: r0 = socket$inet6(0xa, 0x80002, 0x88) We had a similar report [1] for probably sk_memory_allocated_add() in __sk_mem_raise_allocated(), and commit c915fe13cbaa ("udplite: fix NULL pointer dereference") fixed it by setting .memory_allocated for udplite_prot and udplitev6_prot. To fix the variant, we need to set either .sysctl_wmem_offset or .sysctl_rmem. Now UDP and UDPLITE share the same value for .memory_allocated, so we use the same .sysctl_wmem_offset for UDP and UDPLITE. [0]: general protection fault, probably for non-canonical address 0xdffffc0000000000: 0000 [#1] PREEMPT SMP KASAN KASAN: null-ptr-deref in range [0x0000000000000000-0x0000000000000007] CPU: 0 PID: 6829 Comm: syz-executor.1 Not tainted 6.4.0-rc2-syzkaller #0 Hardware name: Google Google Compute Engine/Google Compute Engine, BIOS Google 04/28/2023 RIP: 0010:sk_get_rmem0 include/net/sock.h:2907 [inline] RIP: 0010:__sk_mem_raise_allocated+0x806/0x17a0 net/core/sock.c:3006 Code: c1 ea 03 80 3c 02 00 0f 85 23 0f 00 00 48 8b 44 24 08 48 8b 98 38 01 00 00 48 b8 00 00 00 00 00 fc ff df 48 89 da 48 c1 ea 03 <0f> b6 14 02 48 89 d8 83 e0 07 83 c0 03 38 d0 0f 8d 6f 0a 00 00 8b RSP: 0018:ffffc90005d7f450 EFLAGS: 00010246 RAX: dffffc0000000000 RBX: 0000000000000000 RCX: ffffc90004d92000 RDX: 0000000000000000 RSI: ffffffff88066482 RDI: ffffffff8e2ccbb8 RBP: ffff8880173f7000 R08: 0000000000000005 R09: 0000000000000000 R10: 0000000000000000 R11: 0000000000000000 R12: 0000000000030000 R13: 0000000000000001 R14: 0000000000000340 R15: 0000000000000001 FS: 0000000000000000(0000) GS:ffff8880b9800000(0063) knlGS:00000000f7f1cb40 CS: 0010 DS: 002b ES: 002b CR0: 0000000080050033 CR2: 000000002e82f000 CR3: 0000000034ff0000 CR4: 00000000003506f0 Call Trace: __sk_mem_schedule+0x6c/0xe0 net/core/sock.c:3077 udp_rmem_schedule net/ipv4/udp.c:1539 [inline] __udp_enqueue_schedule_skb+0x776/0xb30 net/ipv4/udp.c:1581 __udpv6_queue_rcv_skb net/ipv6/udp.c:666 [inline] udpv6_queue_rcv_one_skb+0xc39/0x16c0 net/ipv6/udp.c:775 udpv6_queue_rcv_skb+0x194/0xa10 net/ipv6/udp.c:793 __udp6_lib_mcast_deliver net/ipv6/udp.c:906 [inline] __udp6_lib_rcv+0x1bda/0x2bd0 net/ipv6/udp.c:1013 ip6_protocol_deliver_rcu+0x2e7/0x1250 net/ipv6/ip6_input.c:437 ip6_input_finish+0x150/0x2f0 net/ipv6/ip6_input.c:482 NF_HOOK include/linux/netfilter.h:303 [inline] NF_HOOK include/linux/netfilter.h:297 [inline] ip6_input+0xa0/0xd0 net/ipv6/ip6_input.c:491 ip6_mc_input+0x40b/0xf50 net/ipv6/ip6_input.c:585 dst_input include/net/dst.h:468 [inline] ip6_rcv_finish net/ipv6/ip6_input.c:79 [inline] NF_HOOK include/linux/netfilter.h:303 [inline] NF_HOOK include/linux/netfilter.h:297 [inline] ipv6_rcv+0x250/0x380 net/ipv6/ip6_input.c:309 __netif_receive_skb_one_core+0x114/0x180 net/core/dev.c:5491 __netif_receive_skb+0x1f/0x1c0 net/core/dev.c:5605 netif_receive_skb_internal net/core/dev.c:5691 [inline] netif_receive_skb+0x133/0x7a0 net/core/dev.c:5750 tun_rx_batched+0x4b3/0x7a0 drivers/net/tun.c:1553 tun_get_user+0x2452/0x39c0 drivers/net/tun.c:1989 tun_chr_write_iter+0xdf/0x200 drivers/net/tun.c:2035 call_write_iter include/linux/fs.h:1868 [inline] new_sync_write fs/read_write.c:491 [inline] vfs_write+0x945/0xd50 fs/read_write.c:584 ksys_write+0x12b/0x250 fs/read_write.c:637 do_syscall_32_irqs_on arch/x86/entry/common.c:112 [inline] __do_fast_syscall_32+0x65/0xf0 arch/x86/entry/common.c:178 do_fast_syscall_32+0x33/0x70 arch/x86/entry/common.c:203 entry_SYSENTER_compat_after_hwframe+0x70/0x82 RIP: 0023:0xf7f21579 Code: b8 01 10 06 03 74 b4 01 10 07 03 74 b0 01 10 08 03 74 d8 01 00 00 00 00 00 00 00 00 00 00 00 00 00 51 52 55 89 e5 0f 34 cd 80 <5d> 5a 59 c3 90 90 90 90 8d b4 26 00 00 00 00 8d b4 26 00 00 00 00 RSP: 002b:00000000f7f1c590 EFLAGS: 00000282 ORIG_RAX: 0000000000000004 RAX: ffffffffffffffda RBX: 00000000000000c8 RCX: 0000000020000040 RDX: 0000000000000083 RSI: 00000000f734e000 RDI: 0000000000000000 RBP: 0000000000000000 R08: 0000000000000000 R09: 0000000000000000 R10: 0000000000000000 R11: 0000000000000296 R12: 0000000000000000 R13: 0000000000000000 R14: 0000000000000000 R15: 0000000000000000 Modules linked in: Link: https://lore.kernel.org/netdev/CANaxB-yCk8hhP68L4Q2nFOJht8sqgXGGQO2AftpHs0u1xyGG5A@mail.gmail.com/ [1] Fixes: 850cbaddb52d ("udp: use it's own memory accounting schema") Reported-by: syzbot+444ca0907e96f7c5e48b@syzkaller.appspotmail.com Closes: https://syzkaller.appspot.com/bug?extid=444ca0907e96f7c5e48b Signed-off-by: Kuniyuki Iwashima Link: https://lore.kernel.org/r/20230523163305.66466-1-kuniyu@amazon.com Signed-off-by: Paolo Abeni --- net/ipv4/udplite.c | 2 ++ net/ipv6/udplite.c | 2 ++ 2 files changed, 4 insertions(+) diff --git a/net/ipv4/udplite.c b/net/ipv4/udplite.c index e0c9cc39b81e..56d94d23b9e0 100644 --- a/net/ipv4/udplite.c +++ b/net/ipv4/udplite.c @@ -64,6 +64,8 @@ struct proto udplite_prot = { .per_cpu_fw_alloc = &udp_memory_per_cpu_fw_alloc, .sysctl_mem = sysctl_udp_mem, + .sysctl_wmem_offset = offsetof(struct net, ipv4.sysctl_udp_wmem_min), + .sysctl_rmem_offset = offsetof(struct net, ipv4.sysctl_udp_rmem_min), .obj_size = sizeof(struct udp_sock), .h.udp_table = &udplite_table, }; diff --git a/net/ipv6/udplite.c b/net/ipv6/udplite.c index 67eaf3ca14ce..3bab0cc13697 100644 --- a/net/ipv6/udplite.c +++ b/net/ipv6/udplite.c @@ -60,6 +60,8 @@ struct proto udplitev6_prot = { .per_cpu_fw_alloc = &udp_memory_per_cpu_fw_alloc, .sysctl_mem = sysctl_udp_mem, + .sysctl_wmem_offset = offsetof(struct net, ipv4.sysctl_udp_wmem_min), + .sysctl_rmem_offset = offsetof(struct net, ipv4.sysctl_udp_rmem_min), .obj_size = sizeof(struct udp6_sock), .h.udp_table = &udplite_table, };