summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorJoe Lawrence <Joe.Lawrence@stratus.com>2014-10-03 09:58:34 -0400
committerDavid S. Miller <davem@davemloft.net>2014-10-04 20:50:50 -0400
commit47549650abd13d873fd2e5fc218db19e21031074 (patch)
tree28e33cbf979472878236ca81cd86771ae7ab9fa1
parent34a419d4e20d6be5e0c4a3b27f6eface366a4836 (diff)
downloadlinux-47549650abd13d873fd2e5fc218db19e21031074.tar.gz
linux-47549650abd13d873fd2e5fc218db19e21031074.tar.bz2
linux-47549650abd13d873fd2e5fc218db19e21031074.zip
team: avoid race condition in scheduling delayed work
When team_notify_peers and team_mcast_rejoin are called, they both reset their respective .count_pending atomic variable. Then when the actual worker function is executed, the variable is atomically decremented. This pattern introduces a potential race condition where the .count_pending rolls over and the worker function keeps rescheduling until .count_pending decrements to zero again: THREAD 1 THREAD 2 ======== ======== team_notify_peers(teamX) atomic_set count_pending = 1 schedule_delayed_work team_notify_peers(teamX) atomic_set count_pending = 1 team_notify_peers_work atomic_dec_and_test count_pending = 0 (return) schedule_delayed_work team_notify_peers_work atomic_dec_and_test count_pending = -1 schedule_delayed_work (repeat until count_pending = 0) Instead of assigning a new value to .count_pending, use atomic_add to tack-on the additional desired worker function invocations. Signed-off-by: Joe Lawrence <joe.lawrence@stratus.com> Acked-by: Jiri Pirko <jiri@resnulli.us> Fixes: fc423ff00df3a19554414ee ("team: add peer notification") Fixes: 492b200efdd20b8fcfdac87 ("team: add support for sending multicast rejoins") Signed-off-by: David S. Miller <davem@davemloft.net>
-rw-r--r--drivers/net/team/team.c4
1 files changed, 2 insertions, 2 deletions
diff --git a/drivers/net/team/team.c b/drivers/net/team/team.c
index ef10302ec936..1f76c2ea53f2 100644
--- a/drivers/net/team/team.c
+++ b/drivers/net/team/team.c
@@ -647,7 +647,7 @@ static void team_notify_peers(struct team *team)
{
if (!team->notify_peers.count || !netif_running(team->dev))
return;
- atomic_set(&team->notify_peers.count_pending, team->notify_peers.count);
+ atomic_add(team->notify_peers.count, &team->notify_peers.count_pending);
schedule_delayed_work(&team->notify_peers.dw, 0);
}
@@ -687,7 +687,7 @@ static void team_mcast_rejoin(struct team *team)
{
if (!team->mcast_rejoin.count || !netif_running(team->dev))
return;
- atomic_set(&team->mcast_rejoin.count_pending, team->mcast_rejoin.count);
+ atomic_add(team->mcast_rejoin.count, &team->mcast_rejoin.count_pending);
schedule_delayed_work(&team->mcast_rejoin.dw, 0);
}