summaryrefslogtreecommitdiffstats
path: root/net/sched
diff options
context:
space:
mode:
authorQuentin Monnet <quentin.monnet@netronome.com>2018-01-19 17:44:47 -0800
committerDavid S. Miller <davem@davemloft.net>2018-01-22 16:28:32 -0500
commit8f0b425a712b82732127ff7880f92504f20fcc11 (patch)
tree04cb71b3ced41ac9b303313d6f8844403b45d067 /net/sched
parent631f65ff222211711715aad9a640fb5f954c81f5 (diff)
downloadlinux-8f0b425a712b82732127ff7880f92504f20fcc11.tar.gz
linux-8f0b425a712b82732127ff7880f92504f20fcc11.tar.bz2
linux-8f0b425a712b82732127ff7880f92504f20fcc11.zip
net: sched: add extack support for offload via tc_cls_common_offload
Add extack support for hardware offload of classifiers. In order to achieve this, a pointer to a struct netlink_ext_ack is added to the struct tc_cls_common_offload that is passed to the callback for setting up the classifier. Function tc_cls_common_offload_init() is updated to support initialization of this new attribute. Signed-off-by: Quentin Monnet <quentin.monnet@netronome.com> Reviewed-by: Jakub Kicinski <jakub.kicinski@netronome.com> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net/sched')
-rw-r--r--net/sched/cls_bpf.c4
-rw-r--r--net/sched/cls_flower.c6
-rw-r--r--net/sched/cls_matchall.c4
-rw-r--r--net/sched/cls_u32.c8
4 files changed, 11 insertions, 11 deletions
diff --git a/net/sched/cls_bpf.c b/net/sched/cls_bpf.c
index cd4194b1d5e4..c11e0fe23a17 100644
--- a/net/sched/cls_bpf.c
+++ b/net/sched/cls_bpf.c
@@ -159,7 +159,7 @@ static int cls_bpf_offload_cmd(struct tcf_proto *tp, struct cls_bpf_prog *prog,
skip_sw = prog && tc_skip_sw(prog->gen_flags);
obj = prog ?: oldprog;
- tc_cls_common_offload_init(&cls_bpf.common, tp);
+ tc_cls_common_offload_init(&cls_bpf.common, tp, extack);
cls_bpf.command = TC_CLSBPF_OFFLOAD;
cls_bpf.exts = &obj->exts;
cls_bpf.prog = prog ? prog->filter : NULL;
@@ -227,7 +227,7 @@ static void cls_bpf_offload_update_stats(struct tcf_proto *tp,
struct tcf_block *block = tp->chain->block;
struct tc_cls_bpf_offload cls_bpf = {};
- tc_cls_common_offload_init(&cls_bpf.common, tp);
+ tc_cls_common_offload_init(&cls_bpf.common, tp, NULL);
cls_bpf.command = TC_CLSBPF_STATS;
cls_bpf.exts = &prog->exts;
cls_bpf.prog = prog->filter;
diff --git a/net/sched/cls_flower.c b/net/sched/cls_flower.c
index f675a92e1b66..727c10378f37 100644
--- a/net/sched/cls_flower.c
+++ b/net/sched/cls_flower.c
@@ -223,7 +223,7 @@ static void fl_hw_destroy_filter(struct tcf_proto *tp, struct cls_fl_filter *f)
struct tc_cls_flower_offload cls_flower = {};
struct tcf_block *block = tp->chain->block;
- tc_cls_common_offload_init(&cls_flower.common, tp);
+ tc_cls_common_offload_init(&cls_flower.common, tp, NULL);
cls_flower.command = TC_CLSFLOWER_DESTROY;
cls_flower.cookie = (unsigned long) f;
@@ -243,7 +243,7 @@ static int fl_hw_replace_filter(struct tcf_proto *tp,
bool skip_sw = tc_skip_sw(f->flags);
int err;
- tc_cls_common_offload_init(&cls_flower.common, tp);
+ tc_cls_common_offload_init(&cls_flower.common, tp, extack);
cls_flower.command = TC_CLSFLOWER_REPLACE;
cls_flower.cookie = (unsigned long) f;
cls_flower.dissector = dissector;
@@ -272,7 +272,7 @@ static void fl_hw_update_stats(struct tcf_proto *tp, struct cls_fl_filter *f)
struct tc_cls_flower_offload cls_flower = {};
struct tcf_block *block = tp->chain->block;
- tc_cls_common_offload_init(&cls_flower.common, tp);
+ tc_cls_common_offload_init(&cls_flower.common, tp, NULL);
cls_flower.command = TC_CLSFLOWER_STATS;
cls_flower.cookie = (unsigned long) f;
cls_flower.exts = &f->exts;
diff --git a/net/sched/cls_matchall.c b/net/sched/cls_matchall.c
index b47929c15792..d990d2a52c6d 100644
--- a/net/sched/cls_matchall.c
+++ b/net/sched/cls_matchall.c
@@ -76,7 +76,7 @@ static void mall_destroy_hw_filter(struct tcf_proto *tp,
struct tc_cls_matchall_offload cls_mall = {};
struct tcf_block *block = tp->chain->block;
- tc_cls_common_offload_init(&cls_mall.common, tp);
+ tc_cls_common_offload_init(&cls_mall.common, tp, NULL);
cls_mall.command = TC_CLSMATCHALL_DESTROY;
cls_mall.cookie = cookie;
@@ -94,7 +94,7 @@ static int mall_replace_hw_filter(struct tcf_proto *tp,
bool skip_sw = tc_skip_sw(head->flags);
int err;
- tc_cls_common_offload_init(&cls_mall.common, tp);
+ tc_cls_common_offload_init(&cls_mall.common, tp, extack);
cls_mall.command = TC_CLSMATCHALL_REPLACE;
cls_mall.exts = &head->exts;
cls_mall.cookie = cookie;
diff --git a/net/sched/cls_u32.c b/net/sched/cls_u32.c
index 0206c210e25b..7030240f8826 100644
--- a/net/sched/cls_u32.c
+++ b/net/sched/cls_u32.c
@@ -491,7 +491,7 @@ static void u32_clear_hw_hnode(struct tcf_proto *tp, struct tc_u_hnode *h)
struct tcf_block *block = tp->chain->block;
struct tc_cls_u32_offload cls_u32 = {};
- tc_cls_common_offload_init(&cls_u32.common, tp);
+ tc_cls_common_offload_init(&cls_u32.common, tp, NULL);
cls_u32.command = TC_CLSU32_DELETE_HNODE;
cls_u32.hnode.divisor = h->divisor;
cls_u32.hnode.handle = h->handle;
@@ -509,7 +509,7 @@ static int u32_replace_hw_hnode(struct tcf_proto *tp, struct tc_u_hnode *h,
bool offloaded = false;
int err;
- tc_cls_common_offload_init(&cls_u32.common, tp);
+ tc_cls_common_offload_init(&cls_u32.common, tp, extack);
cls_u32.command = TC_CLSU32_NEW_HNODE;
cls_u32.hnode.divisor = h->divisor;
cls_u32.hnode.handle = h->handle;
@@ -534,7 +534,7 @@ static void u32_remove_hw_knode(struct tcf_proto *tp, struct tc_u_knode *n)
struct tcf_block *block = tp->chain->block;
struct tc_cls_u32_offload cls_u32 = {};
- tc_cls_common_offload_init(&cls_u32.common, tp);
+ tc_cls_common_offload_init(&cls_u32.common, tp, NULL);
cls_u32.command = TC_CLSU32_DELETE_KNODE;
cls_u32.knode.handle = n->handle;
@@ -550,7 +550,7 @@ static int u32_replace_hw_knode(struct tcf_proto *tp, struct tc_u_knode *n,
bool skip_sw = tc_skip_sw(flags);
int err;
- tc_cls_common_offload_init(&cls_u32.common, tp);
+ tc_cls_common_offload_init(&cls_u32.common, tp, extack);
cls_u32.command = TC_CLSU32_REPLACE_KNODE;
cls_u32.knode.handle = n->handle;
cls_u32.knode.fshift = n->fshift;