summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorEric Dumazet <edumazet@google.com>2014-03-06 22:57:52 -0800
committerDavid S. Miller <davem@davemloft.net>2014-03-08 19:09:10 -0500
commit2d8d40afd187bced0a3d056366fb58d66fe845e3 (patch)
treec42c299205f38d9a6ab275d53de74583f56e2119
parentd85ea93ffb7e8bb6855d60c0901e4b6571689085 (diff)
downloadlinux-2d8d40afd187bced0a3d056366fb58d66fe845e3.tar.gz
linux-2d8d40afd187bced0a3d056366fb58d66fe845e3.tar.bz2
linux-2d8d40afd187bced0a3d056366fb58d66fe845e3.zip
pkt_sched: fq: do not hold qdisc lock while allocating memory
Resizing fq hash table allocates memory while holding qdisc spinlock, with BH disabled. This is definitely not good, as allocation might sleep. We can drop the lock and get it when needed, we hold RTNL so no other changes can happen at the same time. Signed-off-by: Eric Dumazet <edumazet@google.com> Fixes: afe4fd062416 ("pkt_sched: fq: Fair Queue packet scheduler") Signed-off-by: David S. Miller <davem@davemloft.net>
-rw-r--r--net/sched/sch_fq.c21
1 files changed, 15 insertions, 6 deletions
diff --git a/net/sched/sch_fq.c b/net/sched/sch_fq.c
index 08ef7a42c0e4..21e251766eb1 100644
--- a/net/sched/sch_fq.c
+++ b/net/sched/sch_fq.c
@@ -601,6 +601,7 @@ static int fq_resize(struct Qdisc *sch, u32 log)
{
struct fq_sched_data *q = qdisc_priv(sch);
struct rb_root *array;
+ void *old_fq_root;
u32 idx;
if (q->fq_root && log == q->fq_trees_log)
@@ -615,13 +616,19 @@ static int fq_resize(struct Qdisc *sch, u32 log)
for (idx = 0; idx < (1U << log); idx++)
array[idx] = RB_ROOT;
- if (q->fq_root) {
- fq_rehash(q, q->fq_root, q->fq_trees_log, array, log);
- fq_free(q->fq_root);
- }
+ sch_tree_lock(sch);
+
+ old_fq_root = q->fq_root;
+ if (old_fq_root)
+ fq_rehash(q, old_fq_root, q->fq_trees_log, array, log);
+
q->fq_root = array;
q->fq_trees_log = log;
+ sch_tree_unlock(sch);
+
+ fq_free(old_fq_root);
+
return 0;
}
@@ -697,9 +704,11 @@ static int fq_change(struct Qdisc *sch, struct nlattr *opt)
q->flow_refill_delay = usecs_to_jiffies(usecs_delay);
}
- if (!err)
+ if (!err) {
+ sch_tree_unlock(sch);
err = fq_resize(sch, fq_log);
-
+ sch_tree_lock(sch);
+ }
while (sch->q.qlen > sch->limit) {
struct sk_buff *skb = fq_dequeue(sch);