summaryrefslogtreecommitdiffstats
path: root/net/core
diff options
context:
space:
mode:
authorEric Dumazet <edumazet@google.com>2016-12-02 08:11:00 -0800
committerDavid S. Miller <davem@davemloft.net>2016-12-03 16:12:17 -0500
commit12efa1fa43968fcb707e806dc119df499c17ac2c (patch)
tree1af0100515470e6aa6a942f0d13ff51529efbc81 /net/core
parente01b16a7e217a23eb97e08d0a591735d5aee5efc (diff)
downloadlinux-12efa1fa43968fcb707e806dc119df499c17ac2c.tar.gz
linux-12efa1fa43968fcb707e806dc119df499c17ac2c.tar.bz2
linux-12efa1fa43968fcb707e806dc119df499c17ac2c.zip
net_sched: gen_estimator: account for timer drifts
Under heavy stress, timer used in estimators tend to slowly be delayed by a few jiffies, leading to inaccuracies. Lets remember what was the last scheduled jiffies so that we get more precise estimations, without having to add a multiply/divide in the loop to account for the drifts. Signed-off-by: Eric Dumazet <edumazet@google.com> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net/core')
-rw-r--r--net/core/gen_estimator.c25
1 files changed, 16 insertions, 9 deletions
diff --git a/net/core/gen_estimator.c b/net/core/gen_estimator.c
index cad8e791f28e..0993844faeea 100644
--- a/net/core/gen_estimator.c
+++ b/net/core/gen_estimator.c
@@ -78,8 +78,7 @@
#define EST_MAX_INTERVAL 5
-struct gen_estimator
-{
+struct gen_estimator {
struct list_head list;
struct gnet_stats_basic_packed *bstats;
struct gnet_stats_rate_est64 *rate_est;
@@ -96,8 +95,8 @@ struct gen_estimator
struct rcu_head head;
};
-struct gen_estimator_head
-{
+struct gen_estimator_head {
+ unsigned long next_jiffies;
struct timer_list timer;
struct list_head list;
};
@@ -146,8 +145,15 @@ skip:
spin_unlock(e->stats_lock);
}
- if (!list_empty(&elist[idx].list))
- mod_timer(&elist[idx].timer, jiffies + ((HZ/4) << idx));
+ if (!list_empty(&elist[idx].list)) {
+ elist[idx].next_jiffies += ((HZ/4) << idx);
+
+ if (unlikely(time_after_eq(jiffies, elist[idx].next_jiffies))) {
+ /* Ouch... timer was delayed. */
+ elist[idx].next_jiffies = jiffies + 1;
+ }
+ mod_timer(&elist[idx].timer, elist[idx].next_jiffies);
+ }
rcu_read_unlock();
}
@@ -251,9 +257,10 @@ int gen_new_estimator(struct gnet_stats_basic_packed *bstats,
setup_timer(&elist[idx].timer, est_timer, idx);
}
- if (list_empty(&elist[idx].list))
- mod_timer(&elist[idx].timer, jiffies + ((HZ/4) << idx));
-
+ if (list_empty(&elist[idx].list)) {
+ elist[idx].next_jiffies = jiffies + ((HZ/4) << idx);
+ mod_timer(&elist[idx].timer, elist[idx].next_jiffies);
+ }
list_add_rcu(&est->list, &elist[idx].list);
gen_add_node(est);
spin_unlock_bh(&est_tree_lock);