diff options
author | Vlad Buslov <vladbu@mellanox.com> | 2019-03-21 15:17:42 +0200 |
---|---|---|
committer | David S. Miller <davem@davemloft.net> | 2019-03-21 14:32:17 -0700 |
commit | 3d81e7118d572f37456922929b2b289138b2174f (patch) | |
tree | 025029e937dd8f3d4ad6b38faa0594816ec95439 /net/sched | |
parent | 272ffaadeb3e739baa70aef7e92ad844b62b3304 (diff) | |
download | linux-3d81e7118d572f37456922929b2b289138b2174f.tar.gz linux-3d81e7118d572f37456922929b2b289138b2174f.tar.bz2 linux-3d81e7118d572f37456922929b2b289138b2174f.zip |
net: sched: flower: protect flower classifier state with spinlock
struct tcf_proto was extended with spinlock to be used by classifiers
instead of global rtnl lock. Use it to protect shared flower classifier
data structures (handle_idr, mask hashtable and list) and fields of
individual filters that can be accessed concurrently. This patch set uses
tcf_proto->lock as per instance lock that protects all filters on
tcf_proto.
Signed-off-by: Vlad Buslov <vladbu@mellanox.com>
Reviewed-by: Stefano Brivio <sbrivio@redhat.com>
Acked-by: Jiri Pirko <jiri@mellanox.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net/sched')
-rw-r--r-- | net/sched/cls_flower.c | 39 |
1 files changed, 32 insertions, 7 deletions
diff --git a/net/sched/cls_flower.c b/net/sched/cls_flower.c index 25a4d64b82db..04210d645c78 100644 --- a/net/sched/cls_flower.c +++ b/net/sched/cls_flower.c @@ -384,7 +384,9 @@ static void fl_hw_destroy_filter(struct tcf_proto *tp, struct cls_fl_filter *f, cls_flower.cookie = (unsigned long) f; tc_setup_cb_call(block, TC_SETUP_CLSFLOWER, &cls_flower, false); + spin_lock(&tp->lock); tcf_block_offload_dec(block, &f->flags); + spin_unlock(&tp->lock); } static int fl_hw_replace_filter(struct tcf_proto *tp, @@ -426,7 +428,9 @@ static int fl_hw_replace_filter(struct tcf_proto *tp, return err; } else if (err > 0) { f->in_hw_count = err; + spin_lock(&tp->lock); tcf_block_offload_inc(block, &f->flags); + spin_unlock(&tp->lock); } if (skip_sw && !(f->flags & TCA_CLS_FLAGS_IN_HW)) @@ -514,14 +518,19 @@ static int __fl_delete(struct tcf_proto *tp, struct cls_fl_filter *f, *last = false; - if (f->deleted) + spin_lock(&tp->lock); + if (f->deleted) { + spin_unlock(&tp->lock); return -ENOENT; + } f->deleted = true; rhashtable_remove_fast(&f->mask->ht, &f->ht_node, f->mask->filter_ht_params); idr_remove(&head->handle_idr, f->handle); list_del_rcu(&f->list); + spin_unlock(&tp->lock); + *last = fl_mask_put(head, f->mask, async); if (!tc_skip_hw(f->flags)) fl_hw_destroy_filter(tp, f, extack); @@ -1500,6 +1509,8 @@ static int fl_change(struct net *net, struct sk_buff *in_skb, if (!tc_in_hw(fnew->flags)) fnew->flags |= TCA_CLS_FLAGS_NOT_IN_HW; + spin_lock(&tp->lock); + /* tp was deleted concurrently. -EAGAIN will cause caller to lookup * proto again or create new one, if necessary. */ @@ -1530,6 +1541,8 @@ static int fl_change(struct net *net, struct sk_buff *in_skb, list_replace_rcu(&fold->list, &fnew->list); fold->deleted = true; + spin_unlock(&tp->lock); + fl_mask_put(head, fold->mask, true); if (!tc_skip_hw(fold->flags)) fl_hw_destroy_filter(tp, fold, NULL); @@ -1575,6 +1588,7 @@ static int fl_change(struct net *net, struct sk_buff *in_skb, goto errout_idr; list_add_tail_rcu(&fnew->list, &fnew->mask->filters); + spin_unlock(&tp->lock); } *arg = fnew; @@ -1586,6 +1600,7 @@ static int fl_change(struct net *net, struct sk_buff *in_skb, errout_idr: idr_remove(&head->handle_idr, fnew->handle); errout_hw: + spin_unlock(&tp->lock); if (!tc_skip_hw(fnew->flags)) fl_hw_destroy_filter(tp, fnew, NULL); errout_mask: @@ -1688,8 +1703,10 @@ static int fl_reoffload(struct tcf_proto *tp, bool add, tc_setup_cb_t *cb, continue; } + spin_lock(&tp->lock); tc_cls_offload_cnt_update(block, &f->in_hw_count, &f->flags, add); + spin_unlock(&tp->lock); } } @@ -2223,6 +2240,7 @@ static int fl_dump(struct net *net, struct tcf_proto *tp, void *fh, struct cls_fl_filter *f = fh; struct nlattr *nest; struct fl_flow_key *key, *mask; + bool skip_hw; if (!f) return skb->len; @@ -2233,21 +2251,26 @@ static int fl_dump(struct net *net, struct tcf_proto *tp, void *fh, if (!nest) goto nla_put_failure; + spin_lock(&tp->lock); + if (f->res.classid && nla_put_u32(skb, TCA_FLOWER_CLASSID, f->res.classid)) - goto nla_put_failure; + goto nla_put_failure_locked; key = &f->key; mask = &f->mask->key; + skip_hw = tc_skip_hw(f->flags); if (fl_dump_key(skb, net, key, mask)) - goto nla_put_failure; - - if (!tc_skip_hw(f->flags)) - fl_hw_update_stats(tp, f); + goto nla_put_failure_locked; if (f->flags && nla_put_u32(skb, TCA_FLOWER_FLAGS, f->flags)) - goto nla_put_failure; + goto nla_put_failure_locked; + + spin_unlock(&tp->lock); + + if (!skip_hw) + fl_hw_update_stats(tp, f); if (nla_put_u32(skb, TCA_FLOWER_IN_HW_COUNT, f->in_hw_count)) goto nla_put_failure; @@ -2262,6 +2285,8 @@ static int fl_dump(struct net *net, struct tcf_proto *tp, void *fh, return skb->len; +nla_put_failure_locked: + spin_unlock(&tp->lock); nla_put_failure: nla_nest_cancel(skb, nest); return -1; |