diff options
author | Vlad Buslov <vladbu@mellanox.com> | 2019-02-11 10:55:40 +0200 |
---|---|---|
committer | David S. Miller <davem@davemloft.net> | 2019-02-12 13:41:33 -0500 |
commit | fe2923afc12490e92237d23fc0b29f31da72a4f9 (patch) | |
tree | 5bcb097d9029d658b8a500bdb534eea13924edc9 /net/sched/cls_api.c | |
parent | 4dbfa766440c6dfe3d10f077cde966a7d11b58f1 (diff) | |
download | linux-fe2923afc12490e92237d23fc0b29f31da72a4f9.tar.gz linux-fe2923afc12490e92237d23fc0b29f31da72a4f9.tar.bz2 linux-fe2923afc12490e92237d23fc0b29f31da72a4f9.zip |
net: sched: traverse classifiers in chain with tcf_get_next_proto()
All users of chain->filters_chain rely on rtnl lock and assume that no new
classifier instances are added when traversing the list. Use
tcf_get_next_proto() to traverse filters list without relying on rtnl
mutex. This function iterates over classifiers by taking reference to
current iterator classifier only and doesn't assume external
synchronization of filters list.
Signed-off-by: Vlad Buslov <vladbu@mellanox.com>
Acked-by: Jiri Pirko <jiri@mellanox.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net/sched/cls_api.c')
-rw-r--r-- | net/sched/cls_api.c | 70 |
1 files changed, 60 insertions, 10 deletions
diff --git a/net/sched/cls_api.c b/net/sched/cls_api.c index 37c05b96898f..dca8a3bee9c2 100644 --- a/net/sched/cls_api.c +++ b/net/sched/cls_api.c @@ -980,6 +980,45 @@ tcf_get_next_chain(struct tcf_block *block, struct tcf_chain *chain) } EXPORT_SYMBOL(tcf_get_next_chain); +static struct tcf_proto * +__tcf_get_next_proto(struct tcf_chain *chain, struct tcf_proto *tp) +{ + ASSERT_RTNL(); + mutex_lock(&chain->filter_chain_lock); + + if (!tp) + tp = tcf_chain_dereference(chain->filter_chain, chain); + else + tp = tcf_chain_dereference(tp->next, chain); + + if (tp) + tcf_proto_get(tp); + + mutex_unlock(&chain->filter_chain_lock); + + return tp; +} + +/* Function to be used by all clients that want to iterate over all tp's on + * chain. Users of this function must be tolerant to concurrent tp + * insertion/deletion or ensure that no concurrent chain modification is + * possible. Note that all netlink dump callbacks cannot guarantee to provide + * consistent dump because rtnl lock is released each time skb is filled with + * data and sent to user-space. + */ + +struct tcf_proto * +tcf_get_next_proto(struct tcf_chain *chain, struct tcf_proto *tp) +{ + struct tcf_proto *tp_next = __tcf_get_next_proto(chain, tp); + + if (tp) + tcf_proto_put(tp, NULL); + + return tp_next; +} +EXPORT_SYMBOL(tcf_get_next_proto); + static void tcf_block_flush_all_chains(struct tcf_block *block) { struct tcf_chain *chain; @@ -1352,7 +1391,7 @@ tcf_block_playback_offloads(struct tcf_block *block, tc_setup_cb_t *cb, struct netlink_ext_ack *extack) { struct tcf_chain *chain, *chain_prev; - struct tcf_proto *tp; + struct tcf_proto *tp, *tp_prev; int err; for (chain = __tcf_get_next_chain(block, NULL); @@ -1360,8 +1399,10 @@ tcf_block_playback_offloads(struct tcf_block *block, tc_setup_cb_t *cb, chain_prev = chain, chain = __tcf_get_next_chain(block, chain), tcf_chain_put(chain_prev)) { - for (tp = rtnl_dereference(chain->filter_chain); tp; - tp = rtnl_dereference(tp->next)) { + for (tp = __tcf_get_next_proto(chain, NULL); tp; + tp_prev = tp, + tp = __tcf_get_next_proto(chain, tp), + tcf_proto_put(tp_prev, NULL)) { if (tp->ops->reoffload) { err = tp->ops->reoffload(tp, add, cb, cb_priv, extack); @@ -1378,6 +1419,7 @@ tcf_block_playback_offloads(struct tcf_block *block, tc_setup_cb_t *cb, return 0; err_playback_remove: + tcf_proto_put(tp, NULL); tcf_chain_put(chain); tcf_block_playback_offloads(block, cb, cb_priv, false, offload_in_use, extack); @@ -1677,8 +1719,8 @@ static void tfilter_notify_chain(struct net *net, struct sk_buff *oskb, { struct tcf_proto *tp; - for (tp = rtnl_dereference(chain->filter_chain); - tp; tp = rtnl_dereference(tp->next)) + for (tp = tcf_get_next_proto(chain, NULL); + tp; tp = tcf_get_next_proto(chain, tp)) tfilter_notify(net, oskb, n, tp, block, q, parent, NULL, event, false); } @@ -2104,11 +2146,15 @@ static bool tcf_chain_dump(struct tcf_chain *chain, struct Qdisc *q, u32 parent, struct net *net = sock_net(skb->sk); struct tcf_block *block = chain->block; struct tcmsg *tcm = nlmsg_data(cb->nlh); + struct tcf_proto *tp, *tp_prev; struct tcf_dump_args arg; - struct tcf_proto *tp; - for (tp = rtnl_dereference(chain->filter_chain); - tp; tp = rtnl_dereference(tp->next), (*p_index)++) { + for (tp = __tcf_get_next_proto(chain, NULL); + tp; + tp_prev = tp, + tp = __tcf_get_next_proto(chain, tp), + tcf_proto_put(tp_prev, NULL), + (*p_index)++) { if (*p_index < index_start) continue; if (TC_H_MAJ(tcm->tcm_info) && @@ -2125,7 +2171,7 @@ static bool tcf_chain_dump(struct tcf_chain *chain, struct Qdisc *q, u32 parent, NETLINK_CB(cb->skb).portid, cb->nlh->nlmsg_seq, NLM_F_MULTI, RTM_NEWTFILTER) <= 0) - return false; + goto errout; cb->args[1] = 1; } @@ -2145,9 +2191,13 @@ static bool tcf_chain_dump(struct tcf_chain *chain, struct Qdisc *q, u32 parent, cb->args[2] = arg.w.cookie; cb->args[1] = arg.w.count + 1; if (arg.w.stop) - return false; + goto errout; } return true; + +errout: + tcf_proto_put(tp, NULL); + return false; } /* called with RTNL */ |