summaryrefslogtreecommitdiffstats
path: root/net/rds/send.c
diff options
context:
space:
mode:
authorSowmini Varadhan <sowmini.varadhan@oracle.com>2018-01-04 06:53:00 -0800
committerDavid S. Miller <davem@davemloft.net>2018-01-05 13:39:18 -0500
commit3db6e0d172c94bd9953a1347c55ffb64b1d2e74f (patch)
tree67f61aa4784da3fba56aa43e3626d3f3b6562c78 /net/rds/send.c
parentc90ecbfaf50d2d7db25c531d9169be7e47435f3f (diff)
downloadlinux-stable-3db6e0d172c94bd9953a1347c55ffb64b1d2e74f.tar.gz
linux-stable-3db6e0d172c94bd9953a1347c55ffb64b1d2e74f.tar.bz2
linux-stable-3db6e0d172c94bd9953a1347c55ffb64b1d2e74f.zip
rds: use RCU to synchronize work-enqueue with connection teardown
rds_sendmsg() can enqueue work on cp_send_w from process context, but it should not enqueue this work if connection teardown has commenced (else we risk enquing work after rds_conn_path_destroy() has assumed that all work has been cancelled/flushed). Similarly some other functions like rds_cong_queue_updates and rds_tcp_data_ready are called in softirq context, and may end up enqueuing work on rds_wq after rds_conn_path_destroy() has assumed that all workqs are quiesced. Check the RDS_DESTROY_PENDING bit and use rcu synchronization to avoid all these races. Signed-off-by: Sowmini Varadhan <sowmini.varadhan@oracle.com> Acked-by: Santosh Shilimkar <santosh.shilimkar@oracle.com> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net/rds/send.c')
-rw-r--r--net/rds/send.c37
1 files changed, 32 insertions, 5 deletions
diff --git a/net/rds/send.c b/net/rds/send.c
index f72466c63f0c..d3e32d1f3c7d 100644
--- a/net/rds/send.c
+++ b/net/rds/send.c
@@ -162,6 +162,12 @@ restart:
goto out;
}
+ if (test_bit(RDS_DESTROY_PENDING, &cp->cp_flags)) {
+ release_in_xmit(cp);
+ ret = -ENETUNREACH; /* dont requeue send work */
+ goto out;
+ }
+
/*
* we record the send generation after doing the xmit acquire.
* if someone else manages to jump in and do some work, we'll use
@@ -437,7 +443,12 @@ over_batch:
!list_empty(&cp->cp_send_queue)) && !raced) {
if (batch_count < send_batch_count)
goto restart;
- queue_delayed_work(rds_wq, &cp->cp_send_w, 1);
+ rcu_read_lock();
+ if (test_bit(RDS_DESTROY_PENDING, &cp->cp_flags))
+ ret = -ENETUNREACH;
+ else
+ queue_delayed_work(rds_wq, &cp->cp_send_w, 1);
+ rcu_read_unlock();
} else if (raced) {
rds_stats_inc(s_send_lock_queue_raced);
}
@@ -1151,6 +1162,11 @@ int rds_sendmsg(struct socket *sock, struct msghdr *msg, size_t payload_len)
else
cpath = &conn->c_path[0];
+ if (test_bit(RDS_DESTROY_PENDING, &cpath->cp_flags)) {
+ ret = -EAGAIN;
+ goto out;
+ }
+
rds_conn_path_connect_if_down(cpath);
ret = rds_cong_wait(conn->c_fcong, dport, nonblock, rs);
@@ -1190,9 +1206,17 @@ int rds_sendmsg(struct socket *sock, struct msghdr *msg, size_t payload_len)
rds_stats_inc(s_send_queued);
ret = rds_send_xmit(cpath);
- if (ret == -ENOMEM || ret == -EAGAIN)
- queue_delayed_work(rds_wq, &cpath->cp_send_w, 1);
-
+ if (ret == -ENOMEM || ret == -EAGAIN) {
+ ret = 0;
+ rcu_read_lock();
+ if (test_bit(RDS_DESTROY_PENDING, &cpath->cp_flags))
+ ret = -ENETUNREACH;
+ else
+ queue_delayed_work(rds_wq, &cpath->cp_send_w, 1);
+ rcu_read_unlock();
+ }
+ if (ret)
+ goto out;
rds_message_put(rm);
return payload_len;
@@ -1270,7 +1294,10 @@ rds_send_probe(struct rds_conn_path *cp, __be16 sport,
rds_stats_inc(s_send_pong);
/* schedule the send work on rds_wq */
- queue_delayed_work(rds_wq, &cp->cp_send_w, 1);
+ rcu_read_lock();
+ if (!test_bit(RDS_DESTROY_PENDING, &cp->cp_flags))
+ queue_delayed_work(rds_wq, &cp->cp_send_w, 1);
+ rcu_read_unlock();
rds_message_put(rm);
return 0;