diff options
author | Cong Wang <xiyou.wangcong@gmail.com> | 2018-05-23 15:26:53 -0700 |
---|---|---|
committer | David S. Miller <davem@davemloft.net> | 2018-05-24 22:56:15 -0400 |
commit | aaa908ffbee18a65529b716efb346a626e81559a (patch) | |
tree | 0ff6902780f9a964ac1b8fe8ea6a2bb7d7898cc7 /net/sched/cls_route.c | |
parent | 1bb58d2d3cbebeb2ee38d11e8fa86b06117f5f75 (diff) | |
download | linux-aaa908ffbee18a65529b716efb346a626e81559a.tar.gz linux-aaa908ffbee18a65529b716efb346a626e81559a.tar.bz2 linux-aaa908ffbee18a65529b716efb346a626e81559a.zip |
net_sched: switch to rcu_work
Commit 05f0fe6b74db ("RCU, workqueue: Implement rcu_work") introduces
new API's for dispatching work in a RCU callback. Now we can just
switch to the new API's for tc filters. This could get rid of a lot
of code.
Cc: Tejun Heo <tj@kernel.org>
Cc: "Paul E. McKenney" <paulmck@linux.vnet.ibm.com>
Cc: Jamal Hadi Salim <jhs@mojatatu.com>
Signed-off-by: Cong Wang <xiyou.wangcong@gmail.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net/sched/cls_route.c')
-rw-r--r-- | net/sched/cls_route.c | 23 |
1 files changed, 9 insertions, 14 deletions
diff --git a/net/sched/cls_route.c b/net/sched/cls_route.c index 21a03a8ee029..0404aa5fa7cb 100644 --- a/net/sched/cls_route.c +++ b/net/sched/cls_route.c @@ -57,10 +57,7 @@ struct route4_filter { u32 handle; struct route4_bucket *bkt; struct tcf_proto *tp; - union { - struct work_struct work; - struct rcu_head rcu; - }; + struct rcu_work rwork; }; #define ROUTE4_FAILURE ((struct route4_filter *)(-1L)) @@ -266,19 +263,17 @@ static void __route4_delete_filter(struct route4_filter *f) static void route4_delete_filter_work(struct work_struct *work) { - struct route4_filter *f = container_of(work, struct route4_filter, work); - + struct route4_filter *f = container_of(to_rcu_work(work), + struct route4_filter, + rwork); rtnl_lock(); __route4_delete_filter(f); rtnl_unlock(); } -static void route4_delete_filter(struct rcu_head *head) +static void route4_queue_work(struct route4_filter *f) { - struct route4_filter *f = container_of(head, struct route4_filter, rcu); - - INIT_WORK(&f->work, route4_delete_filter_work); - tcf_queue_work(&f->work); + tcf_queue_work(&f->rwork, route4_delete_filter_work); } static void route4_destroy(struct tcf_proto *tp, struct netlink_ext_ack *extack) @@ -304,7 +299,7 @@ static void route4_destroy(struct tcf_proto *tp, struct netlink_ext_ack *extack) RCU_INIT_POINTER(b->ht[h2], next); tcf_unbind_filter(tp, &f->res); if (tcf_exts_get_net(&f->exts)) - call_rcu(&f->rcu, route4_delete_filter); + route4_queue_work(f); else __route4_delete_filter(f); } @@ -349,7 +344,7 @@ static int route4_delete(struct tcf_proto *tp, void *arg, bool *last, /* Delete it */ tcf_unbind_filter(tp, &f->res); tcf_exts_get_net(&f->exts); - call_rcu(&f->rcu, route4_delete_filter); + tcf_queue_work(&f->rwork, route4_delete_filter_work); /* Strip RTNL protected tree */ for (i = 0; i <= 32; i++) { @@ -554,7 +549,7 @@ static int route4_change(struct net *net, struct sk_buff *in_skb, if (fold) { tcf_unbind_filter(tp, &fold->res); tcf_exts_get_net(&fold->exts); - call_rcu(&fold->rcu, route4_delete_filter); + tcf_queue_work(&fold->rwork, route4_delete_filter_work); } return 0; |