summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorJakub Kicinski <kuba@kernel.org>2024-02-06 17:18:20 -0800
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>2024-02-23 09:51:26 +0100
commite327ed60bff4a991cd7a709c47c4f0c5b4a4fd57 (patch)
tree6f7915d6c25cf3f4de681da8c8beefd291a92c2d
parent6209319b2efdd8524691187ee99c40637558fa33 (diff)
downloadlinux-stable-e327ed60bff4a991cd7a709c47c4f0c5b4a4fd57.tar.gz
linux-stable-e327ed60bff4a991cd7a709c47c4f0c5b4a4fd57.tar.bz2
linux-stable-e327ed60bff4a991cd7a709c47c4f0c5b4a4fd57.zip
tls: fix race between tx work scheduling and socket close
[ Upstream commit e01e3934a1b2d122919f73bc6ddbe1cdafc4bbdb ] Similarly to previous commit, the submitting thread (recvmsg/sendmsg) may exit as soon as the async crypto handler calls complete(). Reorder scheduling the work before calling complete(). This seems more logical in the first place, as it's the inverse order of what the submitting thread will do. Reported-by: valis <sec@valis.email> Fixes: a42055e8d2c3 ("net/tls: Add support for async encryption of records for performance") Signed-off-by: Jakub Kicinski <kuba@kernel.org> Reviewed-by: Simon Horman <horms@kernel.org> Reviewed-by: Sabrina Dubroca <sd@queasysnail.net> Signed-off-by: David S. Miller <davem@davemloft.net> Signed-off-by: Sasha Levin <sashal@kernel.org>
-rw-r--r--net/tls/tls_sw.c16
1 files changed, 6 insertions, 10 deletions
diff --git a/net/tls/tls_sw.c b/net/tls/tls_sw.c
index 635305bebfef..9374a61cef00 100644
--- a/net/tls/tls_sw.c
+++ b/net/tls/tls_sw.c
@@ -447,7 +447,6 @@ static void tls_encrypt_done(void *data, int err)
struct tls_rec *rec = data;
struct scatterlist *sge;
struct sk_msg *msg_en;
- bool ready = false;
struct sock *sk;
msg_en = &rec->msg_encrypted;
@@ -483,19 +482,16 @@ static void tls_encrypt_done(void *data, int err)
/* If received record is at head of tx_list, schedule tx */
first_rec = list_first_entry(&ctx->tx_list,
struct tls_rec, list);
- if (rec == first_rec)
- ready = true;
+ if (rec == first_rec) {
+ /* Schedule the transmission */
+ if (!test_and_set_bit(BIT_TX_SCHEDULED,
+ &ctx->tx_bitmask))
+ schedule_delayed_work(&ctx->tx_work.work, 1);
+ }
}
if (atomic_dec_and_test(&ctx->encrypt_pending))
complete(&ctx->async_wait.completion);
-
- if (!ready)
- return;
-
- /* Schedule the transmission */
- if (!test_and_set_bit(BIT_TX_SCHEDULED, &ctx->tx_bitmask))
- schedule_delayed_work(&ctx->tx_work.work, 1);
}
static int tls_encrypt_async_wait(struct tls_sw_context_tx *ctx)