diff options
author | Jiri Pirko <jiri@mellanox.com> | 2017-10-19 15:50:46 +0200 |
---|---|---|
committer | David S. Miller <davem@davemloft.net> | 2017-10-21 03:04:08 +0100 |
commit | 8d26d5636dff9fca30816579910aaa9a55b4d96d (patch) | |
tree | 3c82ff1aa167e07b81296a2d41cf6cfa3d2f7edd /net | |
parent | 6b3eb752b4b9481868b3393f06a236a1aedfa43f (diff) | |
download | linux-8d26d5636dff9fca30816579910aaa9a55b4d96d.tar.gz linux-8d26d5636dff9fca30816579910aaa9a55b4d96d.tar.bz2 linux-8d26d5636dff9fca30816579910aaa9a55b4d96d.zip |
net: sched: avoid ndo_setup_tc calls for TC_SETUP_CLS*
All drivers are converted to use block callbacks for TC_SETUP_CLS*.
So it is now safe to remove the calls to ndo_setup_tc from cls_*
Signed-off-by: Jiri Pirko <jiri@mellanox.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net')
-rw-r--r-- | net/dsa/slave.c | 2 | ||||
-rw-r--r-- | net/sched/cls_bpf.c | 14 | ||||
-rw-r--r-- | net/sched/cls_flower.c | 20 | ||||
-rw-r--r-- | net/sched/cls_matchall.c | 16 | ||||
-rw-r--r-- | net/sched/cls_u32.c | 31 |
5 files changed, 0 insertions, 83 deletions
diff --git a/net/dsa/slave.c b/net/dsa/slave.c index 80142918d5d1..d0ae7010ea45 100644 --- a/net/dsa/slave.c +++ b/net/dsa/slave.c @@ -846,8 +846,6 @@ static int dsa_slave_setup_tc(struct net_device *dev, enum tc_setup_type type, void *type_data) { switch (type) { - case TC_SETUP_CLSMATCHALL: - return 0; /* will be removed after conversion from ndo */ case TC_SETUP_BLOCK: return dsa_slave_setup_tc_block(dev, type_data); default: diff --git a/net/sched/cls_bpf.c b/net/sched/cls_bpf.c index e379fdf928bd..0f8b51061c39 100644 --- a/net/sched/cls_bpf.c +++ b/net/sched/cls_bpf.c @@ -148,7 +148,6 @@ static int cls_bpf_offload_cmd(struct tcf_proto *tp, struct cls_bpf_prog *prog, enum tc_clsbpf_command cmd) { bool addorrep = cmd == TC_CLSBPF_ADD || cmd == TC_CLSBPF_REPLACE; - struct net_device *dev = tp->q->dev_queue->dev; struct tcf_block *block = tp->chain->block; bool skip_sw = tc_skip_sw(prog->gen_flags); struct tc_cls_bpf_offload cls_bpf = {}; @@ -162,19 +161,6 @@ static int cls_bpf_offload_cmd(struct tcf_proto *tp, struct cls_bpf_prog *prog, cls_bpf.exts_integrated = prog->exts_integrated; cls_bpf.gen_flags = prog->gen_flags; - if (tc_can_offload(dev)) { - err = dev->netdev_ops->ndo_setup_tc(dev, TC_SETUP_CLSBPF, - &cls_bpf); - if (addorrep) { - if (err) { - if (skip_sw) - return err; - } else { - prog->gen_flags |= TCA_CLS_FLAGS_IN_HW; - } - } - } - err = tc_setup_cb_call(block, NULL, TC_SETUP_CLSBPF, &cls_bpf, skip_sw); if (addorrep) { if (err < 0) { diff --git a/net/sched/cls_flower.c b/net/sched/cls_flower.c index 76b4e0a1c92f..16f58abaa697 100644 --- a/net/sched/cls_flower.c +++ b/net/sched/cls_flower.c @@ -200,16 +200,12 @@ static void fl_destroy_filter(struct rcu_head *head) static void fl_hw_destroy_filter(struct tcf_proto *tp, struct cls_fl_filter *f) { struct tc_cls_flower_offload cls_flower = {}; - struct net_device *dev = tp->q->dev_queue->dev; struct tcf_block *block = tp->chain->block; tc_cls_common_offload_init(&cls_flower.common, tp); cls_flower.command = TC_CLSFLOWER_DESTROY; cls_flower.cookie = (unsigned long) f; - if (tc_can_offload(dev)) - dev->netdev_ops->ndo_setup_tc(dev, TC_SETUP_CLSFLOWER, - &cls_flower); tc_setup_cb_call(block, &f->exts, TC_SETUP_CLSFLOWER, &cls_flower, false); } @@ -219,7 +215,6 @@ static int fl_hw_replace_filter(struct tcf_proto *tp, struct fl_flow_key *mask, struct cls_fl_filter *f) { - struct net_device *dev = tp->q->dev_queue->dev; struct tc_cls_flower_offload cls_flower = {}; struct tcf_block *block = tp->chain->block; bool skip_sw = tc_skip_sw(f->flags); @@ -233,17 +228,6 @@ static int fl_hw_replace_filter(struct tcf_proto *tp, cls_flower.key = &f->mkey; cls_flower.exts = &f->exts; - if (tc_can_offload(dev)) { - err = dev->netdev_ops->ndo_setup_tc(dev, TC_SETUP_CLSFLOWER, - &cls_flower); - if (err) { - if (skip_sw) - return err; - } else { - f->flags |= TCA_CLS_FLAGS_IN_HW; - } - } - err = tc_setup_cb_call(block, &f->exts, TC_SETUP_CLSFLOWER, &cls_flower, skip_sw); if (err < 0) { @@ -262,7 +246,6 @@ static int fl_hw_replace_filter(struct tcf_proto *tp, static void fl_hw_update_stats(struct tcf_proto *tp, struct cls_fl_filter *f) { struct tc_cls_flower_offload cls_flower = {}; - struct net_device *dev = tp->q->dev_queue->dev; struct tcf_block *block = tp->chain->block; tc_cls_common_offload_init(&cls_flower.common, tp); @@ -270,9 +253,6 @@ static void fl_hw_update_stats(struct tcf_proto *tp, struct cls_fl_filter *f) cls_flower.cookie = (unsigned long) f; cls_flower.exts = &f->exts; - if (tc_can_offload(dev)) - dev->netdev_ops->ndo_setup_tc(dev, TC_SETUP_CLSFLOWER, - &cls_flower); tc_setup_cb_call(block, &f->exts, TC_SETUP_CLSFLOWER, &cls_flower, false); } diff --git a/net/sched/cls_matchall.c b/net/sched/cls_matchall.c index 5278534c7e87..70e78d74f6d3 100644 --- a/net/sched/cls_matchall.c +++ b/net/sched/cls_matchall.c @@ -54,7 +54,6 @@ static void mall_destroy_hw_filter(struct tcf_proto *tp, struct cls_mall_head *head, unsigned long cookie) { - struct net_device *dev = tp->q->dev_queue->dev; struct tc_cls_matchall_offload cls_mall = {}; struct tcf_block *block = tp->chain->block; @@ -62,9 +61,6 @@ static void mall_destroy_hw_filter(struct tcf_proto *tp, cls_mall.command = TC_CLSMATCHALL_DESTROY; cls_mall.cookie = cookie; - if (tc_can_offload(dev)) - dev->netdev_ops->ndo_setup_tc(dev, TC_SETUP_CLSMATCHALL, - &cls_mall); tc_setup_cb_call(block, NULL, TC_SETUP_CLSMATCHALL, &cls_mall, false); } @@ -72,7 +68,6 @@ static int mall_replace_hw_filter(struct tcf_proto *tp, struct cls_mall_head *head, unsigned long cookie) { - struct net_device *dev = tp->q->dev_queue->dev; struct tc_cls_matchall_offload cls_mall = {}; struct tcf_block *block = tp->chain->block; bool skip_sw = tc_skip_sw(head->flags); @@ -83,17 +78,6 @@ static int mall_replace_hw_filter(struct tcf_proto *tp, cls_mall.exts = &head->exts; cls_mall.cookie = cookie; - if (tc_can_offload(dev)) { - err = dev->netdev_ops->ndo_setup_tc(dev, TC_SETUP_CLSMATCHALL, - &cls_mall); - if (err) { - if (skip_sw) - return err; - } else { - head->flags |= TCA_CLS_FLAGS_IN_HW; - } - } - err = tc_setup_cb_call(block, NULL, TC_SETUP_CLSMATCHALL, &cls_mall, skip_sw); if (err < 0) { diff --git a/net/sched/cls_u32.c b/net/sched/cls_u32.c index d53da7968eda..9ff17159fb61 100644 --- a/net/sched/cls_u32.c +++ b/net/sched/cls_u32.c @@ -464,7 +464,6 @@ static int u32_delete_key(struct tcf_proto *tp, struct tc_u_knode *key) static void u32_clear_hw_hnode(struct tcf_proto *tp, struct tc_u_hnode *h) { - struct net_device *dev = tp->q->dev_queue->dev; struct tcf_block *block = tp->chain->block; struct tc_cls_u32_offload cls_u32 = {}; @@ -474,15 +473,12 @@ static void u32_clear_hw_hnode(struct tcf_proto *tp, struct tc_u_hnode *h) cls_u32.hnode.handle = h->handle; cls_u32.hnode.prio = h->prio; - if (tc_can_offload(dev)) - dev->netdev_ops->ndo_setup_tc(dev, TC_SETUP_CLSU32, &cls_u32); tc_setup_cb_call(block, NULL, TC_SETUP_CLSU32, &cls_u32, false); } static int u32_replace_hw_hnode(struct tcf_proto *tp, struct tc_u_hnode *h, u32 flags) { - struct net_device *dev = tp->q->dev_queue->dev; struct tcf_block *block = tp->chain->block; struct tc_cls_u32_offload cls_u32 = {}; bool skip_sw = tc_skip_sw(flags); @@ -495,17 +491,6 @@ static int u32_replace_hw_hnode(struct tcf_proto *tp, struct tc_u_hnode *h, cls_u32.hnode.handle = h->handle; cls_u32.hnode.prio = h->prio; - if (tc_can_offload(dev)) { - err = dev->netdev_ops->ndo_setup_tc(dev, TC_SETUP_CLSU32, - &cls_u32); - if (err) { - if (skip_sw) - return err; - } else { - offloaded = true; - } - } - err = tc_setup_cb_call(block, NULL, TC_SETUP_CLSU32, &cls_u32, skip_sw); if (err < 0) { u32_clear_hw_hnode(tp, h); @@ -522,7 +507,6 @@ static int u32_replace_hw_hnode(struct tcf_proto *tp, struct tc_u_hnode *h, static void u32_remove_hw_knode(struct tcf_proto *tp, u32 handle) { - struct net_device *dev = tp->q->dev_queue->dev; struct tcf_block *block = tp->chain->block; struct tc_cls_u32_offload cls_u32 = {}; @@ -530,15 +514,12 @@ static void u32_remove_hw_knode(struct tcf_proto *tp, u32 handle) cls_u32.command = TC_CLSU32_DELETE_KNODE; cls_u32.knode.handle = handle; - if (tc_can_offload(dev)) - dev->netdev_ops->ndo_setup_tc(dev, TC_SETUP_CLSU32, &cls_u32); tc_setup_cb_call(block, NULL, TC_SETUP_CLSU32, &cls_u32, false); } static int u32_replace_hw_knode(struct tcf_proto *tp, struct tc_u_knode *n, u32 flags) { - struct net_device *dev = tp->q->dev_queue->dev; struct tcf_block *block = tp->chain->block; struct tc_cls_u32_offload cls_u32 = {}; bool skip_sw = tc_skip_sw(flags); @@ -560,18 +541,6 @@ static int u32_replace_hw_knode(struct tcf_proto *tp, struct tc_u_knode *n, if (n->ht_down) cls_u32.knode.link_handle = n->ht_down->handle; - - if (tc_can_offload(dev)) { - err = dev->netdev_ops->ndo_setup_tc(dev, TC_SETUP_CLSU32, - &cls_u32); - if (err) { - if (skip_sw) - return err; - } else { - n->flags |= TCA_CLS_FLAGS_IN_HW; - } - } - err = tc_setup_cb_call(block, NULL, TC_SETUP_CLSU32, &cls_u32, skip_sw); if (err < 0) { u32_remove_hw_knode(tp, n->handle); |