summaryrefslogtreecommitdiffstats
path: root/net/sched/cls_matchall.c
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2019-05-07 22:03:58 -0700
committerLinus Torvalds <torvalds@linux-foundation.org>2019-05-07 22:03:58 -0700
commit80f232121b69cc69a31ccb2b38c1665d770b0710 (patch)
tree106263eac4ff03b899df695e00dd11e593e74fe2 /net/sched/cls_matchall.c
parent82efe439599439a5e1e225ce5740e6cfb777a7dd (diff)
parenta9e41a529681b38087c91ebc0bb91e12f510ca2d (diff)
downloadlinux-80f232121b69cc69a31ccb2b38c1665d770b0710.tar.gz
linux-80f232121b69cc69a31ccb2b38c1665d770b0710.tar.bz2
linux-80f232121b69cc69a31ccb2b38c1665d770b0710.zip
Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net-next
Pull networking updates from David Miller: "Highlights: 1) Support AES128-CCM ciphers in kTLS, from Vakul Garg. 2) Add fib_sync_mem to control the amount of dirty memory we allow to queue up between synchronize RCU calls, from David Ahern. 3) Make flow classifier more lockless, from Vlad Buslov. 4) Add PHY downshift support to aquantia driver, from Heiner Kallweit. 5) Add SKB cache for TCP rx and tx, from Eric Dumazet. This reduces contention on SLAB spinlocks in heavy RPC workloads. 6) Partial GSO offload support in XFRM, from Boris Pismenny. 7) Add fast link down support to ethtool, from Heiner Kallweit. 8) Use siphash for IP ID generator, from Eric Dumazet. 9) Pull nexthops even further out from ipv4/ipv6 routes and FIB entries, from David Ahern. 10) Move skb->xmit_more into a per-cpu variable, from Florian Westphal. 11) Improve eBPF verifier speed and increase maximum program size, from Alexei Starovoitov. 12) Eliminate per-bucket spinlocks in rhashtable, and instead use bit spinlocks. From Neil Brown. 13) Allow tunneling with GUE encap in ipvs, from Jacky Hu. 14) Improve link partner cap detection in generic PHY code, from Heiner Kallweit. 15) Add layer 2 encap support to bpf_skb_adjust_room(), from Alan Maguire. 16) Remove SKB list implementation assumptions in SCTP, your's truly. 17) Various cleanups, optimizations, and simplifications in r8169 driver. From Heiner Kallweit. 18) Add memory accounting on TX and RX path of SCTP, from Xin Long. 19) Switch PHY drivers over to use dynamic featue detection, from Heiner Kallweit. 20) Support flow steering without masking in dpaa2-eth, from Ioana Ciocoi. 21) Implement ndo_get_devlink_port in netdevsim driver, from Jiri Pirko. 22) Increase the strict parsing of current and future netlink attributes, also export such policies to userspace. From Johannes Berg. 23) Allow DSA tag drivers to be modular, from Andrew Lunn. 24) Remove legacy DSA probing support, also from Andrew Lunn. 25) Allow ll_temac driver to be used on non-x86 platforms, from Esben Haabendal. 26) Add a generic tracepoint for TX queue timeouts to ease debugging, from Cong Wang. 27) More indirect call optimizations, from Paolo Abeni" * git://git.kernel.org/pub/scm/linux/kernel/git/davem/net-next: (1763 commits) cxgb4: Fix error path in cxgb4_init_module net: phy: improve pause mode reporting in phy_print_status dt-bindings: net: Fix a typo in the phy-mode list for ethernet bindings net: macb: Change interrupt and napi enable order in open net: ll_temac: Improve error message on error IRQ net/sched: remove block pointer from common offload structure net: ethernet: support of_get_mac_address new ERR_PTR error net: usb: smsc: fix warning reported by kbuild test robot staging: octeon-ethernet: Fix of_get_mac_address ERR_PTR check net: dsa: support of_get_mac_address new ERR_PTR error net: dsa: sja1105: Fix status initialization in sja1105_get_ethtool_stats vrf: sit mtu should not be updated when vrf netdev is the link net: dsa: Fix error cleanup path in dsa_init_module l2tp: Fix possible NULL pointer dereference taprio: add null check on sched_nest to avoid potential null pointer dereference net: mvpp2: cls: fix less than zero check on a u32 variable net_sched: sch_fq: handle non connected flows net_sched: sch_fq: do not assume EDT packets are ordered net: hns3: use devm_kcalloc when allocating desc_cb net: hns3: some cleanup for struct hns3_enet_ring ...
Diffstat (limited to 'net/sched/cls_matchall.c')
-rw-r--r--net/sched/cls_matchall.c64
1 files changed, 59 insertions, 5 deletions
diff --git a/net/sched/cls_matchall.c b/net/sched/cls_matchall.c
index a13bc351a414..1e98a517fb0b 100644
--- a/net/sched/cls_matchall.c
+++ b/net/sched/cls_matchall.c
@@ -32,6 +32,9 @@ static int mall_classify(struct sk_buff *skb, const struct tcf_proto *tp,
{
struct cls_mall_head *head = rcu_dereference_bh(tp->root);
+ if (unlikely(!head))
+ return -1;
+
if (tc_skip_sw(head->flags))
return -1;
@@ -89,12 +92,29 @@ static int mall_replace_hw_filter(struct tcf_proto *tp,
bool skip_sw = tc_skip_sw(head->flags);
int err;
+ cls_mall.rule = flow_rule_alloc(tcf_exts_num_actions(&head->exts));
+ if (!cls_mall.rule)
+ return -ENOMEM;
+
tc_cls_common_offload_init(&cls_mall.common, tp, head->flags, extack);
cls_mall.command = TC_CLSMATCHALL_REPLACE;
- cls_mall.exts = &head->exts;
cls_mall.cookie = cookie;
+ err = tc_setup_flow_action(&cls_mall.rule->action, &head->exts);
+ if (err) {
+ kfree(cls_mall.rule);
+ mall_destroy_hw_filter(tp, head, cookie, NULL);
+ if (skip_sw)
+ NL_SET_ERR_MSG_MOD(extack, "Failed to setup flow action");
+ else
+ err = 0;
+
+ return err;
+ }
+
err = tc_setup_cb_call(block, TC_SETUP_CLSMATCHALL, &cls_mall, skip_sw);
+ kfree(cls_mall.rule);
+
if (err < 0) {
mall_destroy_hw_filter(tp, head, cookie, NULL);
return err;
@@ -181,8 +201,8 @@ static int mall_change(struct net *net, struct sk_buff *in_skb,
if (head)
return -EEXIST;
- err = nla_parse_nested(tb, TCA_MATCHALL_MAX, tca[TCA_OPTIONS],
- mall_policy, NULL);
+ err = nla_parse_nested_deprecated(tb, TCA_MATCHALL_MAX,
+ tca[TCA_OPTIONS], mall_policy, NULL);
if (err < 0)
return err;
@@ -272,13 +292,27 @@ static int mall_reoffload(struct tcf_proto *tp, bool add, tc_setup_cb_t *cb,
if (tc_skip_hw(head->flags))
return 0;
+ cls_mall.rule = flow_rule_alloc(tcf_exts_num_actions(&head->exts));
+ if (!cls_mall.rule)
+ return -ENOMEM;
+
tc_cls_common_offload_init(&cls_mall.common, tp, head->flags, extack);
cls_mall.command = add ?
TC_CLSMATCHALL_REPLACE : TC_CLSMATCHALL_DESTROY;
- cls_mall.exts = &head->exts;
cls_mall.cookie = (unsigned long)head;
+ err = tc_setup_flow_action(&cls_mall.rule->action, &head->exts);
+ if (err) {
+ kfree(cls_mall.rule);
+ if (add && tc_skip_sw(head->flags)) {
+ NL_SET_ERR_MSG_MOD(extack, "Failed to setup flow action");
+ return err;
+ }
+ }
+
err = cb(TC_SETUP_CLSMATCHALL, &cls_mall, cb_priv);
+ kfree(cls_mall.rule);
+
if (err) {
if (add && tc_skip_sw(head->flags))
return err;
@@ -290,6 +324,23 @@ static int mall_reoffload(struct tcf_proto *tp, bool add, tc_setup_cb_t *cb,
return 0;
}
+static void mall_stats_hw_filter(struct tcf_proto *tp,
+ struct cls_mall_head *head,
+ unsigned long cookie)
+{
+ struct tc_cls_matchall_offload cls_mall = {};
+ struct tcf_block *block = tp->chain->block;
+
+ tc_cls_common_offload_init(&cls_mall.common, tp, head->flags, NULL);
+ cls_mall.command = TC_CLSMATCHALL_STATS;
+ cls_mall.cookie = cookie;
+
+ tc_setup_cb_call(block, TC_SETUP_CLSMATCHALL, &cls_mall, false);
+
+ tcf_exts_stats_update(&head->exts, cls_mall.stats.bytes,
+ cls_mall.stats.pkts, cls_mall.stats.lastused);
+}
+
static int mall_dump(struct net *net, struct tcf_proto *tp, void *fh,
struct sk_buff *skb, struct tcmsg *t, bool rtnl_held)
{
@@ -301,9 +352,12 @@ static int mall_dump(struct net *net, struct tcf_proto *tp, void *fh,
if (!head)
return skb->len;
+ if (!tc_skip_hw(head->flags))
+ mall_stats_hw_filter(tp, head, (unsigned long)head);
+
t->tcm_handle = head->handle;
- nest = nla_nest_start(skb, TCA_OPTIONS);
+ nest = nla_nest_start_noflag(skb, TCA_OPTIONS);
if (!nest)
goto nla_put_failure;