summaryrefslogtreecommitdiffstats
path: root/net
diff options
context:
space:
mode:
authorTung Nguyen <tung.q.nguyen@dektech.com.au>2018-10-12 22:46:55 +0200
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>2018-11-04 14:51:53 +0100
commitb3c2e8e106df1d271fb7f4e3d3def7f13a47ac74 (patch)
treea3140e7050ee2b9f75b52b4f0729ff5825ec5597 /net
parent721933262ef72a24595598ad6d20e98fc1b1ab82 (diff)
downloadlinux-stable-b3c2e8e106df1d271fb7f4e3d3def7f13a47ac74.tar.gz
linux-stable-b3c2e8e106df1d271fb7f4e3d3def7f13a47ac74.tar.bz2
linux-stable-b3c2e8e106df1d271fb7f4e3d3def7f13a47ac74.zip
tipc: fix unsafe rcu locking when accessing publication list
[ Upstream commit d3092b2efca1cd1d492d0b08499a2066c5ca8cec ] The binding table's 'cluster_scope' list is rcu protected to handle races between threads changing the list and those traversing the list at the same moment. We have now found that the function named_distribute() uses the regular list_for_each() macro to traverse the said list. Likewise, the function tipc_named_withdraw() is removing items from the same list using the regular list_del() call. When these two functions execute in parallel we see occasional crashes. This commit fixes this by adding the missing _rcu() suffixes. Signed-off-by: Tung Nguyen <tung.q.nguyen@dektech.com.au> Signed-off-by: Jon Maloy <jon.maloy@ericsson.com> Signed-off-by: David S. Miller <davem@davemloft.net> Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
Diffstat (limited to 'net')
-rw-r--r--net/tipc/name_distr.c4
1 files changed, 2 insertions, 2 deletions
diff --git a/net/tipc/name_distr.c b/net/tipc/name_distr.c
index 51b4b96f89db..3cfeb9df64b0 100644
--- a/net/tipc/name_distr.c
+++ b/net/tipc/name_distr.c
@@ -115,7 +115,7 @@ struct sk_buff *tipc_named_withdraw(struct net *net, struct publication *publ)
struct sk_buff *buf;
struct distr_item *item;
- list_del(&publ->binding_node);
+ list_del_rcu(&publ->binding_node);
if (publ->scope == TIPC_NODE_SCOPE)
return NULL;
@@ -147,7 +147,7 @@ static void named_distribute(struct net *net, struct sk_buff_head *list,
ITEM_SIZE) * ITEM_SIZE;
u32 msg_rem = msg_dsz;
- list_for_each_entry(publ, pls, binding_node) {
+ list_for_each_entry_rcu(publ, pls, binding_node) {
/* Prepare next buffer: */
if (!skb) {
skb = named_prepare_buf(net, PUBLICATION, msg_rem,