net: tls: factor out tls_*crypt_async_wait()

[ Upstream commit c57ca512f3b68ddcd62bda9cc24a8f5584ab01b1 ]

Factor out waiting for async encrypt and decrypt to finish.
There are already multiple copies and a subsequent fix will
need more. No functional changes.

Note that crypto_wait_req() returns wait->err

Signed-off-by: Jakub Kicinski <kuba@kernel.org>
Reviewed-by: Simon Horman <horms@kernel.org>
Reviewed-by: Sabrina Dubroca <sd@queasysnail.net>
Signed-off-by: David S. Miller <davem@davemloft.net>
Stable-dep-of: aec7961916f3 ("tls: fix race between async notify and socket close")
Signed-off-by: Sasha Levin <sashal@kernel.org>
This commit is contained in:
Jakub Kicinski 2024-02-06 17:18:18 -08:00 committed by Greg Kroah-Hartman
parent d55eb0b495
commit 2c6841c882

View File

@ -229,6 +229,20 @@ static void tls_decrypt_done(crypto_completion_data_t *data, int err)
spin_unlock_bh(&ctx->decrypt_compl_lock); spin_unlock_bh(&ctx->decrypt_compl_lock);
} }
static int tls_decrypt_async_wait(struct tls_sw_context_rx *ctx)
{
int pending;
spin_lock_bh(&ctx->decrypt_compl_lock);
reinit_completion(&ctx->async_wait.completion);
pending = atomic_read(&ctx->decrypt_pending);
spin_unlock_bh(&ctx->decrypt_compl_lock);
if (pending)
crypto_wait_req(-EINPROGRESS, &ctx->async_wait);
return ctx->async_wait.err;
}
static int tls_do_decryption(struct sock *sk, static int tls_do_decryption(struct sock *sk,
struct scatterlist *sgin, struct scatterlist *sgin,
struct scatterlist *sgout, struct scatterlist *sgout,
@ -496,6 +510,28 @@ static void tls_encrypt_done(crypto_completion_data_t *data, int err)
schedule_delayed_work(&ctx->tx_work.work, 1); schedule_delayed_work(&ctx->tx_work.work, 1);
} }
static int tls_encrypt_async_wait(struct tls_sw_context_tx *ctx)
{
int pending;
spin_lock_bh(&ctx->encrypt_compl_lock);
ctx->async_notify = true;
pending = atomic_read(&ctx->encrypt_pending);
spin_unlock_bh(&ctx->encrypt_compl_lock);
if (pending)
crypto_wait_req(-EINPROGRESS, &ctx->async_wait);
else
reinit_completion(&ctx->async_wait.completion);
/* There can be no concurrent accesses, since we have no
* pending encrypt operations
*/
WRITE_ONCE(ctx->async_notify, false);
return ctx->async_wait.err;
}
static int tls_do_encryption(struct sock *sk, static int tls_do_encryption(struct sock *sk,
struct tls_context *tls_ctx, struct tls_context *tls_ctx,
struct tls_sw_context_tx *ctx, struct tls_sw_context_tx *ctx,
@ -953,7 +989,6 @@ int tls_sw_sendmsg(struct sock *sk, struct msghdr *msg, size_t size)
int num_zc = 0; int num_zc = 0;
int orig_size; int orig_size;
int ret = 0; int ret = 0;
int pending;
if (msg->msg_flags & ~(MSG_MORE | MSG_DONTWAIT | MSG_NOSIGNAL | if (msg->msg_flags & ~(MSG_MORE | MSG_DONTWAIT | MSG_NOSIGNAL |
MSG_CMSG_COMPAT)) MSG_CMSG_COMPAT))
@ -1122,24 +1157,12 @@ trim_sgl:
if (!num_async) { if (!num_async) {
goto send_end; goto send_end;
} else if (num_zc) { } else if (num_zc) {
int err;
/* Wait for pending encryptions to get completed */ /* Wait for pending encryptions to get completed */
spin_lock_bh(&ctx->encrypt_compl_lock); err = tls_encrypt_async_wait(ctx);
ctx->async_notify = true; if (err) {
ret = err;
pending = atomic_read(&ctx->encrypt_pending);
spin_unlock_bh(&ctx->encrypt_compl_lock);
if (pending)
crypto_wait_req(-EINPROGRESS, &ctx->async_wait);
else
reinit_completion(&ctx->async_wait.completion);
/* There can be no concurrent accesses, since we have no
* pending encrypt operations
*/
WRITE_ONCE(ctx->async_notify, false);
if (ctx->async_wait.err) {
ret = ctx->async_wait.err;
copied = 0; copied = 0;
} }
} }
@ -1171,7 +1194,6 @@ void tls_sw_splice_eof(struct socket *sock)
ssize_t copied = 0; ssize_t copied = 0;
bool retrying = false; bool retrying = false;
int ret = 0; int ret = 0;
int pending;
if (!ctx->open_rec) if (!ctx->open_rec)
return; return;
@ -1203,22 +1225,7 @@ retry:
} }
/* Wait for pending encryptions to get completed */ /* Wait for pending encryptions to get completed */
spin_lock_bh(&ctx->encrypt_compl_lock); if (tls_encrypt_async_wait(ctx))
ctx->async_notify = true;
pending = atomic_read(&ctx->encrypt_pending);
spin_unlock_bh(&ctx->encrypt_compl_lock);
if (pending)
crypto_wait_req(-EINPROGRESS, &ctx->async_wait);
else
reinit_completion(&ctx->async_wait.completion);
/* There can be no concurrent accesses, since we have no pending
* encrypt operations
*/
WRITE_ONCE(ctx->async_notify, false);
if (ctx->async_wait.err)
goto unlock; goto unlock;
/* Transmit if any encryptions have completed */ /* Transmit if any encryptions have completed */
@ -2197,16 +2204,10 @@ put_on_rx_list:
recv_end: recv_end:
if (async) { if (async) {
int ret, pending; int ret;
/* Wait for all previously submitted records to be decrypted */ /* Wait for all previously submitted records to be decrypted */
spin_lock_bh(&ctx->decrypt_compl_lock); ret = tls_decrypt_async_wait(ctx);
reinit_completion(&ctx->async_wait.completion);
pending = atomic_read(&ctx->decrypt_pending);
spin_unlock_bh(&ctx->decrypt_compl_lock);
ret = 0;
if (pending)
ret = crypto_wait_req(-EINPROGRESS, &ctx->async_wait);
__skb_queue_purge(&ctx->async_hold); __skb_queue_purge(&ctx->async_hold);
if (ret) { if (ret) {
@ -2425,16 +2426,9 @@ void tls_sw_release_resources_tx(struct sock *sk)
struct tls_context *tls_ctx = tls_get_ctx(sk); struct tls_context *tls_ctx = tls_get_ctx(sk);
struct tls_sw_context_tx *ctx = tls_sw_ctx_tx(tls_ctx); struct tls_sw_context_tx *ctx = tls_sw_ctx_tx(tls_ctx);
struct tls_rec *rec, *tmp; struct tls_rec *rec, *tmp;
int pending;
/* Wait for any pending async encryptions to complete */ /* Wait for any pending async encryptions to complete */
spin_lock_bh(&ctx->encrypt_compl_lock); tls_encrypt_async_wait(ctx);
ctx->async_notify = true;
pending = atomic_read(&ctx->encrypt_pending);
spin_unlock_bh(&ctx->encrypt_compl_lock);
if (pending)
crypto_wait_req(-EINPROGRESS, &ctx->async_wait);
tls_tx_records(sk, -1); tls_tx_records(sk, -1);