diff options
author | Eric Dumazet <edumazet@google.com> | 2017-03-01 14:28:39 -0800 |
---|---|---|
committer | David S. Miller <davem@davemloft.net> | 2017-03-01 20:55:57 -0800 |
commit | 13baa00ad01bb3a9f893e3a08cbc2d072fc0c15d (patch) | |
tree | ebcc0624dcccf6c25949f3e52c26a75f542bb7b8 /net | |
parent | 540e2894f7905538740aaf122bd8e0548e1c34a4 (diff) | |
download | linux-stable-13baa00ad01bb3a9f893e3a08cbc2d072fc0c15d.tar.gz linux-stable-13baa00ad01bb3a9f893e3a08cbc2d072fc0c15d.tar.bz2 linux-stable-13baa00ad01bb3a9f893e3a08cbc2d072fc0c15d.zip |
net: net_enable_timestamp() can be called from irq contexts
It is now very clear that silly TCP listeners might play with
enabling/disabling timestamping while new children are added
to their accept queue.
Meaning net_enable_timestamp() can be called from BH context
while current state of the static key is not enabled.
Lets play safe and allow all contexts.
The work queue is scheduled only under the problematic cases,
which are the static key enable/disable transition, to not slow down
critical paths.
This extends and improves what we did in commit 5fa8bbda38c6 ("net: use
a work queue to defer net_disable_timestamp() work")
Fixes: b90e5794c5bd ("net: dont call jump_label_dec from irq context")
Signed-off-by: Eric Dumazet <edumazet@google.com>
Reported-by: Dmitry Vyukov <dvyukov@google.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net')
-rw-r--r-- | net/core/dev.c | 35 |
1 files changed, 31 insertions, 4 deletions
diff --git a/net/core/dev.c b/net/core/dev.c index e63bf61b19be..8637b2b71f3d 100644 --- a/net/core/dev.c +++ b/net/core/dev.c @@ -1698,27 +1698,54 @@ EXPORT_SYMBOL_GPL(net_dec_egress_queue); static struct static_key netstamp_needed __read_mostly; #ifdef HAVE_JUMP_LABEL static atomic_t netstamp_needed_deferred; +static atomic_t netstamp_wanted; static void netstamp_clear(struct work_struct *work) { int deferred = atomic_xchg(&netstamp_needed_deferred, 0); + int wanted; - while (deferred--) - static_key_slow_dec(&netstamp_needed); + wanted = atomic_add_return(deferred, &netstamp_wanted); + if (wanted > 0) + static_key_enable(&netstamp_needed); + else + static_key_disable(&netstamp_needed); } static DECLARE_WORK(netstamp_work, netstamp_clear); #endif void net_enable_timestamp(void) { +#ifdef HAVE_JUMP_LABEL + int wanted; + + while (1) { + wanted = atomic_read(&netstamp_wanted); + if (wanted <= 0) + break; + if (atomic_cmpxchg(&netstamp_wanted, wanted, wanted + 1) == wanted) + return; + } + atomic_inc(&netstamp_needed_deferred); + schedule_work(&netstamp_work); +#else static_key_slow_inc(&netstamp_needed); +#endif } EXPORT_SYMBOL(net_enable_timestamp); void net_disable_timestamp(void) { #ifdef HAVE_JUMP_LABEL - /* net_disable_timestamp() can be called from non process context */ - atomic_inc(&netstamp_needed_deferred); + int wanted; + + while (1) { + wanted = atomic_read(&netstamp_wanted); + if (wanted <= 1) + break; + if (atomic_cmpxchg(&netstamp_wanted, wanted, wanted - 1) == wanted) + return; + } + atomic_dec(&netstamp_needed_deferred); schedule_work(&netstamp_work); #else static_key_slow_dec(&netstamp_needed); |