From: Jakub Kicinski kuba@kernel.org
mainline inclusion from mainline-v6.8-rc5 commit c57ca512f3b68ddcd62bda9cc24a8f5584ab01b1 category: bugfix bugzilla: https://gitee.com/src-openeuler/kernel/issues/I92REK CVE: CVE-2024-26583
Reference: https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git/commit/?i...
--------------------------------
Factor out waiting for async encrypt and decrypt to finish. There are already multiple copies and a subsequent fix will need more. No functional changes.
Note that crypto_wait_req() returns wait->err
Signed-off-by: Jakub Kicinski kuba@kernel.org Reviewed-by: Simon Horman horms@kernel.org Reviewed-by: Sabrina Dubroca sd@queasysnail.net Signed-off-by: David S. Miller davem@davemloft.net Conflicts: net/tls/tls_sw.c Signed-off-by: Ziyang Xuan william.xuanziyang@huawei.com --- net/tls/tls_sw.c | 89 ++++++++++++++++++++++++++---------------------- 1 file changed, 49 insertions(+), 40 deletions(-)
diff --git a/net/tls/tls_sw.c b/net/tls/tls_sw.c index c6afd6b4467f..18b336e4cbbc 100644 --- a/net/tls/tls_sw.c +++ b/net/tls/tls_sw.c @@ -221,6 +221,20 @@ static void tls_decrypt_done(struct crypto_async_request *req, int err) spin_unlock_bh(&ctx->decrypt_compl_lock); }
+static int tls_decrypt_async_wait(struct tls_sw_context_rx *ctx) +{ + int pending; + + spin_lock_bh(&ctx->decrypt_compl_lock); + reinit_completion(&ctx->async_wait.completion); + pending = atomic_read(&ctx->decrypt_pending); + spin_unlock_bh(&ctx->decrypt_compl_lock); + if (pending) + crypto_wait_req(-EINPROGRESS, &ctx->async_wait); + + return ctx->async_wait.err; +} + static int tls_do_decryption(struct sock *sk, struct sk_buff *skb, struct scatterlist *sgin, @@ -491,6 +505,28 @@ static void tls_encrypt_done(struct crypto_async_request *req, int err) schedule_delayed_work(&ctx->tx_work.work, 1); }
+static int tls_encrypt_async_wait(struct tls_sw_context_tx *ctx) +{ + int pending; + + spin_lock_bh(&ctx->encrypt_compl_lock); + ctx->async_notify = true; + + pending = atomic_read(&ctx->encrypt_pending); + spin_unlock_bh(&ctx->encrypt_compl_lock); + if (pending) + crypto_wait_req(-EINPROGRESS, &ctx->async_wait); + else + reinit_completion(&ctx->async_wait.completion); + + /* There can be no concurrent accesses, since we have no + * pending encrypt operations + */ + WRITE_ONCE(ctx->async_notify, false); + + return ctx->async_wait.err; +} + static int tls_do_encryption(struct sock *sk, struct tls_context *tls_ctx, struct tls_sw_context_tx *ctx, @@ -947,7 +983,6 @@ int tls_sw_sendmsg(struct sock *sk, struct msghdr *msg, size_t size) int num_zc = 0; int orig_size; int ret = 0; - int pending;
if (msg->msg_flags & ~(MSG_MORE | MSG_DONTWAIT | MSG_NOSIGNAL | MSG_CMSG_COMPAT)) @@ -1116,24 +1151,12 @@ int tls_sw_sendmsg(struct sock *sk, struct msghdr *msg, size_t size) if (!num_async) { goto send_end; } else if (num_zc) { - /* Wait for pending encryptions to get completed */ - spin_lock_bh(&ctx->encrypt_compl_lock); - ctx->async_notify = true; - - pending = atomic_read(&ctx->encrypt_pending); - spin_unlock_bh(&ctx->encrypt_compl_lock); - if (pending) - crypto_wait_req(-EINPROGRESS, &ctx->async_wait); - else - reinit_completion(&ctx->async_wait.completion); - - /* There can be no concurrent accesses, since we have no - * pending encrypt operations - */ - WRITE_ONCE(ctx->async_notify, false); + int err;
- if (ctx->async_wait.err) { - ret = ctx->async_wait.err; + /* Wait for pending encryptions to get completed */ + err = tls_encrypt_async_wait(ctx); + if (err) { + ret = err; copied = 0; } } @@ -1774,7 +1797,6 @@ int tls_sw_recvmsg(struct sock *sk, bool is_peek = flags & MSG_PEEK; bool bpf_strp_enabled; int num_async = 0; - int pending;
flags |= nonblock;
@@ -1952,19 +1974,13 @@ int tls_sw_recvmsg(struct sock *sk, recv_end: if (num_async) { /* Wait for all previously submitted records to be decrypted */ - spin_lock_bh(&ctx->decrypt_compl_lock); - reinit_completion(&ctx->async_wait.completion); - pending = atomic_read(&ctx->decrypt_pending); - spin_unlock_bh(&ctx->decrypt_compl_lock); - if (pending) { - err = crypto_wait_req(-EINPROGRESS, &ctx->async_wait); - if (err) { - /* one of async decrypt failed */ - tls_err_abort(sk, err); - copied = 0; - decrypted = 0; - goto end; - } + err = tls_decrypt_async_wait(ctx); + if (err) { + /* one of async decrypt failed */ + tls_err_abort(sk, err); + copied = 0; + decrypted = 0; + goto end; }
/* Drain records from the rx_list & copy if required */ @@ -2178,16 +2194,9 @@ void tls_sw_release_resources_tx(struct sock *sk) struct tls_context *tls_ctx = tls_get_ctx(sk); struct tls_sw_context_tx *ctx = tls_sw_ctx_tx(tls_ctx); struct tls_rec *rec, *tmp; - int pending;
/* Wait for any pending async encryptions to complete */ - spin_lock_bh(&ctx->encrypt_compl_lock); - ctx->async_notify = true; - pending = atomic_read(&ctx->encrypt_pending); - spin_unlock_bh(&ctx->encrypt_compl_lock); - - if (pending) - crypto_wait_req(-EINPROGRESS, &ctx->async_wait); + tls_encrypt_async_wait(ctx);
tls_tx_records(sk, -1);