summaryrefslogtreecommitdiffstats
path: root/net
diff options
context:
space:
mode:
authorJakub Kicinski <jakub.kicinski@netronome.com>2019-06-04 12:00:12 -0700
committerDavid S. Miller <davem@davemloft.net>2019-06-04 13:34:37 -0700
commite52972c11d6b1262964db96d65934196db621685 (patch)
treeb082376e72b7935d16315fa9e1e9620fc8bca38b /net
parent27393f8c6efc03b8e0b64134721b0d337fca0a80 (diff)
downloadlinux-e52972c11d6b1262964db96d65934196db621685.tar.gz
linux-e52972c11d6b1262964db96d65934196db621685.tar.bz2
linux-e52972c11d6b1262964db96d65934196db621685.zip
net/tls: replace the sleeping lock around RX resync with a bit lock
Commit 38030d7cb779 ("net/tls: avoid NULL-deref on resync during device removal") tried to fix a potential NULL-dereference by taking the context rwsem. Unfortunately the RX resync may get called from soft IRQ, so we can't use the rwsem to protect from the device disappearing. Because we are guaranteed there can be only one resync at a time (it's called from strparser) use a bit to indicate resync is busy and make device removal wait for the bit to get cleared. Note that there is a leftover "flags" field in struct tls_context already. Fixes: 4799ac81e52a ("tls: Add rx inline crypto offload") Signed-off-by: Jakub Kicinski <jakub.kicinski@netronome.com> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net')
-rw-r--r--net/tls/tls_device.c27
1 files changed, 21 insertions, 6 deletions
diff --git a/net/tls/tls_device.c b/net/tls/tls_device.c
index 49b3a2ff8ef3..1f9cf57d9754 100644
--- a/net/tls/tls_device.c
+++ b/net/tls/tls_device.c
@@ -550,10 +550,22 @@ void tls_device_write_space(struct sock *sk, struct tls_context *ctx)
}
}
+static void tls_device_resync_rx(struct tls_context *tls_ctx,
+ struct sock *sk, u32 seq, u64 rcd_sn)
+{
+ struct net_device *netdev;
+
+ if (WARN_ON(test_and_set_bit(TLS_RX_SYNC_RUNNING, &tls_ctx->flags)))
+ return;
+ netdev = READ_ONCE(tls_ctx->netdev);
+ if (netdev)
+ netdev->tlsdev_ops->tls_dev_resync_rx(netdev, sk, seq, rcd_sn);
+ clear_bit_unlock(TLS_RX_SYNC_RUNNING, &tls_ctx->flags);
+}
+
void handle_device_resync(struct sock *sk, u32 seq, u64 rcd_sn)
{
struct tls_context *tls_ctx = tls_get_ctx(sk);
- struct net_device *netdev = tls_ctx->netdev;
struct tls_offload_context_rx *rx_ctx;
u32 is_req_pending;
s64 resync_req;
@@ -568,10 +580,10 @@ void handle_device_resync(struct sock *sk, u32 seq, u64 rcd_sn)
is_req_pending = resync_req;
if (unlikely(is_req_pending) && req_seq == seq &&
- atomic64_try_cmpxchg(&rx_ctx->resync_req, &resync_req, 0))
- netdev->tlsdev_ops->tls_dev_resync_rx(netdev, sk,
- seq + TLS_HEADER_SIZE - 1,
- rcd_sn);
+ atomic64_try_cmpxchg(&rx_ctx->resync_req, &resync_req, 0)) {
+ seq += TLS_HEADER_SIZE - 1;
+ tls_device_resync_rx(tls_ctx, sk, seq, rcd_sn);
+ }
}
static int tls_device_reencrypt(struct sock *sk, struct sk_buff *skb)
@@ -972,7 +984,10 @@ static int tls_device_down(struct net_device *netdev)
if (ctx->rx_conf == TLS_HW)
netdev->tlsdev_ops->tls_dev_del(netdev, ctx,
TLS_OFFLOAD_CTX_DIR_RX);
- ctx->netdev = NULL;
+ WRITE_ONCE(ctx->netdev, NULL);
+ smp_mb__before_atomic(); /* pairs with test_and_set_bit() */
+ while (test_bit(TLS_RX_SYNC_RUNNING, &ctx->flags))
+ usleep_range(10, 200);
dev_put(netdev);
list_del_init(&ctx->list);