summaryrefslogtreecommitdiffstats
path: root/net/sched
diff options
context:
space:
mode:
authorEric Dumazet <edumazet@google.com>2016-06-10 16:41:39 -0700
committerDavid S. Miller <davem@davemloft.net>2016-06-10 23:58:21 -0700
commit45f50bed1d808794e514e9eed0e579a8756ce2ba (patch)
tree05d9f30419d6a5ef9b503487661959ef29c2fec5 /net/sched
parent42117927cab5a13192ecc227bea19da5059ffc6c (diff)
downloadlinux-45f50bed1d808794e514e9eed0e579a8756ce2ba.tar.gz
linux-45f50bed1d808794e514e9eed0e579a8756ce2ba.tar.bz2
linux-45f50bed1d808794e514e9eed0e579a8756ce2ba.zip
net_sched: remove generic throttled management
__QDISC_STATE_THROTTLED bit manipulation is rather expensive for HTB and few others. I already removed it for sch_fq in commit f2600cf02b5b ("net: sched: avoid costly atomic operation in fq_dequeue()") and so far nobody complained. When one ore more packets are stuck in one or more throttled HTB class, a htb dequeue() performs two atomic operations to clear/set __QDISC_STATE_THROTTLED bit, while root qdisc lock is held. Removing this pair of atomic operations bring me a 8 % performance increase on 200 TCP_RR tests, in presence of throttled classes. This patch has no side effect, since nothing actually uses disc_is_throttled() anymore. Signed-off-by: Eric Dumazet <edumazet@google.com> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net/sched')
-rw-r--r--net/sched/sch_api.c7
-rw-r--r--net/sched/sch_cbq.c2
-rw-r--r--net/sched/sch_fq.c3
-rw-r--r--net/sched/sch_hfsc.c1
-rw-r--r--net/sched/sch_htb.c3
-rw-r--r--net/sched/sch_netem.c1
-rw-r--r--net/sched/sch_tbf.c4
7 files changed, 4 insertions, 17 deletions
diff --git a/net/sched/sch_api.c b/net/sched/sch_api.c
index d4a8bbfcc953..401eda6de682 100644
--- a/net/sched/sch_api.c
+++ b/net/sched/sch_api.c
@@ -583,7 +583,6 @@ static enum hrtimer_restart qdisc_watchdog(struct hrtimer *timer)
timer);
rcu_read_lock();
- qdisc_unthrottled(wd->qdisc);
__netif_schedule(qdisc_root(wd->qdisc));
rcu_read_unlock();
@@ -598,15 +597,12 @@ void qdisc_watchdog_init(struct qdisc_watchdog *wd, struct Qdisc *qdisc)
}
EXPORT_SYMBOL(qdisc_watchdog_init);
-void qdisc_watchdog_schedule_ns(struct qdisc_watchdog *wd, u64 expires, bool throttle)
+void qdisc_watchdog_schedule_ns(struct qdisc_watchdog *wd, u64 expires)
{
if (test_bit(__QDISC_STATE_DEACTIVATED,
&qdisc_root_sleeping(wd->qdisc)->state))
return;
- if (throttle)
- qdisc_throttled(wd->qdisc);
-
if (wd->last_expires == expires)
return;
@@ -620,7 +616,6 @@ EXPORT_SYMBOL(qdisc_watchdog_schedule_ns);
void qdisc_watchdog_cancel(struct qdisc_watchdog *wd)
{
hrtimer_cancel(&wd->timer);
- qdisc_unthrottled(wd->qdisc);
}
EXPORT_SYMBOL(qdisc_watchdog_cancel);
diff --git a/net/sched/sch_cbq.c b/net/sched/sch_cbq.c
index 6e61f9aa8783..a29fd811d7b9 100644
--- a/net/sched/sch_cbq.c
+++ b/net/sched/sch_cbq.c
@@ -513,7 +513,6 @@ static enum hrtimer_restart cbq_undelay(struct hrtimer *timer)
hrtimer_start(&q->delay_timer, time, HRTIMER_MODE_ABS_PINNED);
}
- qdisc_unthrottled(sch);
__netif_schedule(qdisc_root(sch));
return HRTIMER_NORESTART;
}
@@ -819,7 +818,6 @@ cbq_dequeue(struct Qdisc *sch)
if (skb) {
qdisc_bstats_update(sch, skb);
sch->q.qlen--;
- qdisc_unthrottled(sch);
return skb;
}
diff --git a/net/sched/sch_fq.c b/net/sched/sch_fq.c
index 3c6a47d66a04..f49c81e91acd 100644
--- a/net/sched/sch_fq.c
+++ b/net/sched/sch_fq.c
@@ -445,8 +445,7 @@ begin:
if (!head->first) {
if (q->time_next_delayed_flow != ~0ULL)
qdisc_watchdog_schedule_ns(&q->watchdog,
- q->time_next_delayed_flow,
- false);
+ q->time_next_delayed_flow);
return NULL;
}
}
diff --git a/net/sched/sch_hfsc.c b/net/sched/sch_hfsc.c
index eb3d3f5aba80..bd08c363a26d 100644
--- a/net/sched/sch_hfsc.c
+++ b/net/sched/sch_hfsc.c
@@ -1664,7 +1664,6 @@ hfsc_dequeue(struct Qdisc *sch)
set_passive(cl);
}
- qdisc_unthrottled(sch);
qdisc_bstats_update(sch, skb);
qdisc_qstats_backlog_dec(sch, skb);
sch->q.qlen--;
diff --git a/net/sched/sch_htb.c b/net/sched/sch_htb.c
index b74d06668ab4..07dcd2933f01 100644
--- a/net/sched/sch_htb.c
+++ b/net/sched/sch_htb.c
@@ -889,7 +889,6 @@ static struct sk_buff *htb_dequeue(struct Qdisc *sch)
if (skb != NULL) {
ok:
qdisc_bstats_update(sch, skb);
- qdisc_unthrottled(sch);
qdisc_qstats_backlog_dec(sch, skb);
sch->q.qlen--;
return skb;
@@ -929,7 +928,7 @@ ok:
}
qdisc_qstats_overlimit(sch);
if (likely(next_event > q->now))
- qdisc_watchdog_schedule_ns(&q->watchdog, next_event, true);
+ qdisc_watchdog_schedule_ns(&q->watchdog, next_event);
else
schedule_work(&q->work);
fin:
diff --git a/net/sched/sch_netem.c b/net/sched/sch_netem.c
index 2dbe732ca135..876df13c745a 100644
--- a/net/sched/sch_netem.c
+++ b/net/sched/sch_netem.c
@@ -587,7 +587,6 @@ tfifo_dequeue:
if (skb) {
qdisc_qstats_backlog_dec(sch, skb);
deliver:
- qdisc_unthrottled(sch);
qdisc_bstats_update(sch, skb);
return skb;
}
diff --git a/net/sched/sch_tbf.c b/net/sched/sch_tbf.c
index 7fa3d6e1291c..c12df84d1078 100644
--- a/net/sched/sch_tbf.c
+++ b/net/sched/sch_tbf.c
@@ -254,14 +254,12 @@ static struct sk_buff *tbf_dequeue(struct Qdisc *sch)
q->ptokens = ptoks;
qdisc_qstats_backlog_dec(sch, skb);
sch->q.qlen--;
- qdisc_unthrottled(sch);
qdisc_bstats_update(sch, skb);
return skb;
}
qdisc_watchdog_schedule_ns(&q->watchdog,
- now + max_t(long, -toks, -ptoks),
- true);
+ now + max_t(long, -toks, -ptoks));
/* Maybe we have a shorter packet in the queue,
which can be sent now. It sounds cool,