summaryrefslogtreecommitdiffstats
path: root/net/sched
diff options
context:
space:
mode:
Diffstat (limited to 'net/sched')
-rw-r--r--net/sched/act_ct.c9
-rw-r--r--net/sched/act_pedit.c48
-rw-r--r--net/sched/act_police.c10
-rw-r--r--net/sched/cls_api.c15
-rw-r--r--net/sched/cls_flower.c3
-rw-r--r--net/sched/cls_u32.c18
-rw-r--r--net/sched/sch_api.c88
-rw-r--r--net/sched/sch_fq_pie.c10
-rw-r--r--net/sched/sch_generic.c44
-rw-r--r--net/sched/sch_ingress.c16
-rw-r--r--net/sched/sch_mq.c8
-rw-r--r--net/sched/sch_mqprio.c8
-rw-r--r--net/sched/sch_pie.c5
-rw-r--r--net/sched/sch_red.c5
-rw-r--r--net/sched/sch_sfq.c5
-rw-r--r--net/sched/sch_taprio.c9
-rw-r--r--net/sched/sch_teql.c2
17 files changed, 217 insertions, 86 deletions
diff --git a/net/sched/act_ct.c b/net/sched/act_ct.c
index 9cc0bc7c71ed..abc71a06d634 100644
--- a/net/sched/act_ct.c
+++ b/net/sched/act_ct.c
@@ -610,6 +610,7 @@ static bool tcf_ct_flow_table_lookup(struct tcf_ct_params *p,
struct flow_offload_tuple tuple = {};
enum ip_conntrack_info ctinfo;
struct tcphdr *tcph = NULL;
+ bool force_refresh = false;
struct flow_offload *flow;
struct nf_conn *ct;
u8 dir;
@@ -647,6 +648,7 @@ static bool tcf_ct_flow_table_lookup(struct tcf_ct_params *p,
* established state, then don't refresh.
*/
return false;
+ force_refresh = true;
}
if (tcph && (unlikely(tcph->fin || tcph->rst))) {
@@ -660,7 +662,12 @@ static bool tcf_ct_flow_table_lookup(struct tcf_ct_params *p,
else
ctinfo = IP_CT_ESTABLISHED_REPLY;
- flow_offload_refresh(nf_ft, flow);
+ flow_offload_refresh(nf_ft, flow, force_refresh);
+ if (!test_bit(IPS_ASSURED_BIT, &ct->status)) {
+ /* Process this flow in SW to allow promoting to ASSURED */
+ return false;
+ }
+
nf_conntrack_get(&ct->ct_general);
nf_ct_set(skb, ct, ctinfo);
if (nf_ft->flags & NF_FLOWTABLE_COUNTER)
diff --git a/net/sched/act_pedit.c b/net/sched/act_pedit.c
index fc945c7e4123..c819b812a899 100644
--- a/net/sched/act_pedit.c
+++ b/net/sched/act_pedit.c
@@ -13,7 +13,10 @@
#include <linux/rtnetlink.h>
#include <linux/module.h>
#include <linux/init.h>
+#include <linux/ip.h>
+#include <linux/ipv6.h>
#include <linux/slab.h>
+#include <net/ipv6.h>
#include <net/netlink.h>
#include <net/pkt_sched.h>
#include <linux/tc_act/tc_pedit.h>
@@ -327,28 +330,58 @@ static bool offset_valid(struct sk_buff *skb, int offset)
return true;
}
-static void pedit_skb_hdr_offset(struct sk_buff *skb,
+static int pedit_l4_skb_offset(struct sk_buff *skb, int *hoffset, const int header_type)
+{
+ const int noff = skb_network_offset(skb);
+ int ret = -EINVAL;
+ struct iphdr _iph;
+
+ switch (skb->protocol) {
+ case htons(ETH_P_IP): {
+ const struct iphdr *iph = skb_header_pointer(skb, noff, sizeof(_iph), &_iph);
+
+ if (!iph)
+ goto out;
+ *hoffset = noff + iph->ihl * 4;
+ ret = 0;
+ break;
+ }
+ case htons(ETH_P_IPV6):
+ ret = ipv6_find_hdr(skb, hoffset, header_type, NULL, NULL) == header_type ? 0 : -EINVAL;
+ break;
+ }
+out:
+ return ret;
+}
+
+static int pedit_skb_hdr_offset(struct sk_buff *skb,
enum pedit_header_type htype, int *hoffset)
{
+ int ret = -EINVAL;
/* 'htype' is validated in the netlink parsing */
switch (htype) {
case TCA_PEDIT_KEY_EX_HDR_TYPE_ETH:
- if (skb_mac_header_was_set(skb))
+ if (skb_mac_header_was_set(skb)) {
*hoffset = skb_mac_offset(skb);
+ ret = 0;
+ }
break;
case TCA_PEDIT_KEY_EX_HDR_TYPE_NETWORK:
case TCA_PEDIT_KEY_EX_HDR_TYPE_IP4:
case TCA_PEDIT_KEY_EX_HDR_TYPE_IP6:
*hoffset = skb_network_offset(skb);
+ ret = 0;
break;
case TCA_PEDIT_KEY_EX_HDR_TYPE_TCP:
+ ret = pedit_l4_skb_offset(skb, hoffset, IPPROTO_TCP);
+ break;
case TCA_PEDIT_KEY_EX_HDR_TYPE_UDP:
- if (skb_transport_header_was_set(skb))
- *hoffset = skb_transport_offset(skb);
+ ret = pedit_l4_skb_offset(skb, hoffset, IPPROTO_UDP);
break;
default:
break;
}
+ return ret;
}
TC_INDIRECT_SCOPE int tcf_pedit_act(struct sk_buff *skb,
@@ -384,6 +417,7 @@ TC_INDIRECT_SCOPE int tcf_pedit_act(struct sk_buff *skb,
int hoffset = 0;
u32 *ptr, hdata;
u32 val;
+ int rc;
if (tkey_ex) {
htype = tkey_ex->htype;
@@ -392,7 +426,11 @@ TC_INDIRECT_SCOPE int tcf_pedit_act(struct sk_buff *skb,
tkey_ex++;
}
- pedit_skb_hdr_offset(skb, htype, &hoffset);
+ rc = pedit_skb_hdr_offset(skb, htype, &hoffset);
+ if (rc) {
+ pr_info_ratelimited("tc action pedit unable to extract header offset for header type (0x%x)\n", htype);
+ goto bad;
+ }
if (tkey->offmask) {
u8 *d, _d;
diff --git a/net/sched/act_police.c b/net/sched/act_police.c
index 227cba58ce9f..2e9dce03d1ec 100644
--- a/net/sched/act_police.c
+++ b/net/sched/act_police.c
@@ -357,23 +357,23 @@ static int tcf_police_dump(struct sk_buff *skb, struct tc_action *a,
opt.burst = PSCHED_NS2TICKS(p->tcfp_burst);
if (p->rate_present) {
psched_ratecfg_getrate(&opt.rate, &p->rate);
- if ((police->params->rate.rate_bytes_ps >= (1ULL << 32)) &&
+ if ((p->rate.rate_bytes_ps >= (1ULL << 32)) &&
nla_put_u64_64bit(skb, TCA_POLICE_RATE64,
- police->params->rate.rate_bytes_ps,
+ p->rate.rate_bytes_ps,
TCA_POLICE_PAD))
goto nla_put_failure;
}
if (p->peak_present) {
psched_ratecfg_getrate(&opt.peakrate, &p->peak);
- if ((police->params->peak.rate_bytes_ps >= (1ULL << 32)) &&
+ if ((p->peak.rate_bytes_ps >= (1ULL << 32)) &&
nla_put_u64_64bit(skb, TCA_POLICE_PEAKRATE64,
- police->params->peak.rate_bytes_ps,
+ p->peak.rate_bytes_ps,
TCA_POLICE_PAD))
goto nla_put_failure;
}
if (p->pps_present) {
if (nla_put_u64_64bit(skb, TCA_POLICE_PKTRATE64,
- police->params->ppsrate.rate_pkts_ps,
+ p->ppsrate.rate_pkts_ps,
TCA_POLICE_PAD))
goto nla_put_failure;
if (nla_put_u64_64bit(skb, TCA_POLICE_PKTBURST64,
diff --git a/net/sched/cls_api.c b/net/sched/cls_api.c
index 2621550bfddc..a193cc7b3241 100644
--- a/net/sched/cls_api.c
+++ b/net/sched/cls_api.c
@@ -43,8 +43,6 @@
#include <net/flow_offload.h>
#include <net/tc_wrapper.h>
-extern const struct nla_policy rtm_tca_policy[TCA_MAX + 1];
-
/* The list of all installed classifier types */
static LIST_HEAD(tcf_proto_base);
@@ -659,8 +657,8 @@ static void __tcf_chain_put(struct tcf_chain *chain, bool by_act,
{
struct tcf_block *block = chain->block;
const struct tcf_proto_ops *tmplt_ops;
+ unsigned int refcnt, non_act_refcnt;
bool free_block = false;
- unsigned int refcnt;
void *tmplt_priv;
mutex_lock(&block->lock);
@@ -680,13 +678,15 @@ static void __tcf_chain_put(struct tcf_chain *chain, bool by_act,
* save these to temporary variables.
*/
refcnt = --chain->refcnt;
+ non_act_refcnt = refcnt - chain->action_refcnt;
tmplt_ops = chain->tmplt_ops;
tmplt_priv = chain->tmplt_priv;
- /* The last dropped non-action reference will trigger notification. */
- if (refcnt - chain->action_refcnt == 0 && !by_act) {
- tc_chain_notify_delete(tmplt_ops, tmplt_priv, chain->index,
- block, NULL, 0, 0, false);
+ if (non_act_refcnt == chain->explicitly_created && !by_act) {
+ if (non_act_refcnt == 0)
+ tc_chain_notify_delete(tmplt_ops, tmplt_priv,
+ chain->index, block, NULL, 0, 0,
+ false);
/* Last reference to chain, no need to lock. */
chain->flushing = false;
}
@@ -2952,6 +2952,7 @@ static int tc_chain_tmplt_add(struct tcf_chain *chain, struct net *net,
return PTR_ERR(ops);
if (!ops->tmplt_create || !ops->tmplt_destroy || !ops->tmplt_dump) {
NL_SET_ERR_MSG(extack, "Chain templates are not supported with specified classifier");
+ module_put(ops->owner);
return -EOPNOTSUPP;
}
diff --git a/net/sched/cls_flower.c b/net/sched/cls_flower.c
index 9dbc43388e57..815c3e416bc5 100644
--- a/net/sched/cls_flower.c
+++ b/net/sched/cls_flower.c
@@ -1153,6 +1153,9 @@ static int fl_set_geneve_opt(const struct nlattr *nla, struct fl_flow_key *key,
if (option_len > sizeof(struct geneve_opt))
data_len = option_len - sizeof(struct geneve_opt);
+ if (key->enc_opts.len > FLOW_DIS_TUN_OPTS_MAX - 4)
+ return -ERANGE;
+
opt = (struct geneve_opt *)&key->enc_opts.data[key->enc_opts.len];
memset(opt, 0xff, option_len);
opt->length = data_len / 4;
diff --git a/net/sched/cls_u32.c b/net/sched/cls_u32.c
index 4e2e269f121f..d15d50de7980 100644
--- a/net/sched/cls_u32.c
+++ b/net/sched/cls_u32.c
@@ -718,13 +718,19 @@ static int u32_set_parms(struct net *net, struct tcf_proto *tp,
struct nlattr *est, u32 flags, u32 fl_flags,
struct netlink_ext_ack *extack)
{
- int err;
+ int err, ifindex = -1;
err = tcf_exts_validate_ex(net, tp, tb, est, &n->exts, flags,
fl_flags, extack);
if (err < 0)
return err;
+ if (tb[TCA_U32_INDEV]) {
+ ifindex = tcf_change_indev(net, tb[TCA_U32_INDEV], extack);
+ if (ifindex < 0)
+ return -EINVAL;
+ }
+
if (tb[TCA_U32_LINK]) {
u32 handle = nla_get_u32(tb[TCA_U32_LINK]);
struct tc_u_hnode *ht_down = NULL, *ht_old;
@@ -759,13 +765,9 @@ static int u32_set_parms(struct net *net, struct tcf_proto *tp,
tcf_bind_filter(tp, &n->res, base);
}
- if (tb[TCA_U32_INDEV]) {
- int ret;
- ret = tcf_change_indev(net, tb[TCA_U32_INDEV], extack);
- if (ret < 0)
- return -EINVAL;
- n->ifindex = ret;
- }
+ if (ifindex >= 0)
+ n->ifindex = ifindex;
+
return 0;
}
diff --git a/net/sched/sch_api.c b/net/sched/sch_api.c
index fdb8f429333d..aa6b1fe65151 100644
--- a/net/sched/sch_api.c
+++ b/net/sched/sch_api.c
@@ -309,7 +309,7 @@ struct Qdisc *qdisc_lookup(struct net_device *dev, u32 handle)
if (dev_ingress_queue(dev))
q = qdisc_match_from_root(
- dev_ingress_queue(dev)->qdisc_sleeping,
+ rtnl_dereference(dev_ingress_queue(dev)->qdisc_sleeping),
handle);
out:
return q;
@@ -328,7 +328,8 @@ struct Qdisc *qdisc_lookup_rcu(struct net_device *dev, u32 handle)
nq = dev_ingress_queue_rcu(dev);
if (nq)
- q = qdisc_match_from_root(nq->qdisc_sleeping, handle);
+ q = qdisc_match_from_root(rcu_dereference(nq->qdisc_sleeping),
+ handle);
out:
return q;
}
@@ -634,8 +635,13 @@ EXPORT_SYMBOL(qdisc_watchdog_init);
void qdisc_watchdog_schedule_range_ns(struct qdisc_watchdog *wd, u64 expires,
u64 delta_ns)
{
- if (test_bit(__QDISC_STATE_DEACTIVATED,
- &qdisc_root_sleeping(wd->qdisc)->state))
+ bool deactivated;
+
+ rcu_read_lock();
+ deactivated = test_bit(__QDISC_STATE_DEACTIVATED,
+ &qdisc_root_sleeping(wd->qdisc)->state);
+ rcu_read_unlock();
+ if (deactivated)
return;
if (hrtimer_is_queued(&wd->timer)) {
@@ -1073,17 +1079,29 @@ static int qdisc_graft(struct net_device *dev, struct Qdisc *parent,
if (parent == NULL) {
unsigned int i, num_q, ingress;
+ struct netdev_queue *dev_queue;
ingress = 0;
num_q = dev->num_tx_queues;
if ((q && q->flags & TCQ_F_INGRESS) ||
(new && new->flags & TCQ_F_INGRESS)) {
- num_q = 1;
ingress = 1;
- if (!dev_ingress_queue(dev)) {
+ dev_queue = dev_ingress_queue(dev);
+ if (!dev_queue) {
NL_SET_ERR_MSG(extack, "Device does not have an ingress queue");
return -ENOENT;
}
+
+ q = rtnl_dereference(dev_queue->qdisc_sleeping);
+
+ /* This is the counterpart of that qdisc_refcount_inc_nz() call in
+ * __tcf_qdisc_find() for filter requests.
+ */
+ if (!qdisc_refcount_dec_if_one(q)) {
+ NL_SET_ERR_MSG(extack,
+ "Current ingress or clsact Qdisc has ongoing filter requests");
+ return -EBUSY;
+ }
}
if (dev->flags & IFF_UP)
@@ -1094,18 +1112,26 @@ static int qdisc_graft(struct net_device *dev, struct Qdisc *parent,
if (new && new->ops->attach && !ingress)
goto skip;
- for (i = 0; i < num_q; i++) {
- struct netdev_queue *dev_queue = dev_ingress_queue(dev);
-
- if (!ingress)
+ if (!ingress) {
+ for (i = 0; i < num_q; i++) {
dev_queue = netdev_get_tx_queue(dev, i);
+ old = dev_graft_qdisc(dev_queue, new);
- old = dev_graft_qdisc(dev_queue, new);
- if (new && i > 0)
- qdisc_refcount_inc(new);
-
- if (!ingress)
+ if (new && i > 0)
+ qdisc_refcount_inc(new);
qdisc_put(old);
+ }
+ } else {
+ old = dev_graft_qdisc(dev_queue, NULL);
+
+ /* {ingress,clsact}_destroy() @old before grafting @new to avoid
+ * unprotected concurrent accesses to net_device::miniq_{in,e}gress
+ * pointer(s) in mini_qdisc_pair_swap().
+ */
+ qdisc_notify(net, skb, n, classid, old, new, extack);
+ qdisc_destroy(old);
+
+ dev_graft_qdisc(dev_queue, new);
}
skip:
@@ -1119,8 +1145,6 @@ skip:
if (new && new->ops->attach)
new->ops->attach(new);
- } else {
- notify_and_destroy(net, skb, n, classid, old, new, extack);
}
if (dev->flags & IFF_UP)
@@ -1252,7 +1276,12 @@ static struct Qdisc *qdisc_create(struct net_device *dev,
sch->parent = parent;
if (handle == TC_H_INGRESS) {
- sch->flags |= TCQ_F_INGRESS;
+ if (!(sch->flags & TCQ_F_INGRESS)) {
+ NL_SET_ERR_MSG(extack,
+ "Specified parent ID is reserved for ingress and clsact Qdiscs");
+ err = -EINVAL;
+ goto err_out3;
+ }
handle = TC_H_MAKE(TC_H_INGRESS, 0);
} else {
if (handle == 0) {
@@ -1473,7 +1502,7 @@ static int tc_get_qdisc(struct sk_buff *skb, struct nlmsghdr *n,
}
q = qdisc_leaf(p, clid);
} else if (dev_ingress_queue(dev)) {
- q = dev_ingress_queue(dev)->qdisc_sleeping;
+ q = rtnl_dereference(dev_ingress_queue(dev)->qdisc_sleeping);
}
} else {
q = rtnl_dereference(dev->qdisc);
@@ -1559,7 +1588,7 @@ replay:
}
q = qdisc_leaf(p, clid);
} else if (dev_ingress_queue_create(dev)) {
- q = dev_ingress_queue(dev)->qdisc_sleeping;
+ q = rtnl_dereference(dev_ingress_queue(dev)->qdisc_sleeping);
}
} else {
q = rtnl_dereference(dev->qdisc);
@@ -1591,11 +1620,20 @@ replay:
NL_SET_ERR_MSG(extack, "Invalid qdisc name");
return -EINVAL;
}
+ if (q->flags & TCQ_F_INGRESS) {
+ NL_SET_ERR_MSG(extack,
+ "Cannot regraft ingress or clsact Qdiscs");
+ return -EINVAL;
+ }
if (q == p ||
(p && check_loop(q, p, 0))) {
NL_SET_ERR_MSG(extack, "Qdisc parent/child loop detected");
return -ELOOP;
}
+ if (clid == TC_H_INGRESS) {
+ NL_SET_ERR_MSG(extack, "Ingress cannot graft directly");
+ return -EINVAL;
+ }
qdisc_refcount_inc(q);
goto graft;
} else {
@@ -1791,8 +1829,8 @@ static int tc_dump_qdisc(struct sk_buff *skb, struct netlink_callback *cb)
dev_queue = dev_ingress_queue(dev);
if (dev_queue &&
- tc_dump_qdisc_root(dev_queue->qdisc_sleeping, skb, cb,
- &q_idx, s_q_idx, false,
+ tc_dump_qdisc_root(rtnl_dereference(dev_queue->qdisc_sleeping),
+ skb, cb, &q_idx, s_q_idx, false,
tca[TCA_DUMP_INVISIBLE]) < 0)
goto done;
@@ -2235,8 +2273,8 @@ static int tc_dump_tclass(struct sk_buff *skb, struct netlink_callback *cb)
dev_queue = dev_ingress_queue(dev);
if (dev_queue &&
- tc_dump_tclass_root(dev_queue->qdisc_sleeping, skb, tcm, cb,
- &t, s_t, false) < 0)
+ tc_dump_tclass_root(rtnl_dereference(dev_queue->qdisc_sleeping),
+ skb, tcm, cb, &t, s_t, false) < 0)
goto done;
done:
@@ -2288,7 +2326,9 @@ static struct pernet_operations psched_net_ops = {
.exit = psched_net_exit,
};
+#if IS_ENABLED(CONFIG_RETPOLINE)
DEFINE_STATIC_KEY_FALSE(tc_skip_wrapper);
+#endif
static int __init pktsched_init(void)
{
diff --git a/net/sched/sch_fq_pie.c b/net/sched/sch_fq_pie.c
index 6980796d435d..591d87d5e5c0 100644
--- a/net/sched/sch_fq_pie.c
+++ b/net/sched/sch_fq_pie.c
@@ -201,6 +201,11 @@ out:
return NET_XMIT_CN;
}
+static struct netlink_range_validation fq_pie_q_range = {
+ .min = 1,
+ .max = 1 << 20,
+};
+
static const struct nla_policy fq_pie_policy[TCA_FQ_PIE_MAX + 1] = {
[TCA_FQ_PIE_LIMIT] = {.type = NLA_U32},
[TCA_FQ_PIE_FLOWS] = {.type = NLA_U32},
@@ -208,7 +213,8 @@ static const struct nla_policy fq_pie_policy[TCA_FQ_PIE_MAX + 1] = {
[TCA_FQ_PIE_TUPDATE] = {.type = NLA_U32},
[TCA_FQ_PIE_ALPHA] = {.type = NLA_U32},
[TCA_FQ_PIE_BETA] = {.type = NLA_U32},
- [TCA_FQ_PIE_QUANTUM] = {.type = NLA_U32},
+ [TCA_FQ_PIE_QUANTUM] =
+ NLA_POLICY_FULL_RANGE(NLA_U32, &fq_pie_q_range),
[TCA_FQ_PIE_MEMORY_LIMIT] = {.type = NLA_U32},
[TCA_FQ_PIE_ECN_PROB] = {.type = NLA_U32},
[TCA_FQ_PIE_ECN] = {.type = NLA_U32},
@@ -373,6 +379,7 @@ static void fq_pie_timer(struct timer_list *t)
spinlock_t *root_lock; /* to lock qdisc for probability calculations */
u32 idx;
+ rcu_read_lock();
root_lock = qdisc_lock(qdisc_root_sleeping(sch));
spin_lock(root_lock);
@@ -385,6 +392,7 @@ static void fq_pie_timer(struct timer_list *t)
mod_timer(&q->adapt_timer, jiffies + q->p_params.tupdate);
spin_unlock(root_lock);
+ rcu_read_unlock();
}
static int fq_pie_init(struct Qdisc *sch, struct nlattr *opt,
diff --git a/net/sched/sch_generic.c b/net/sched/sch_generic.c
index 37e41f972f69..5d7e23f4cc0e 100644
--- a/net/sched/sch_generic.c
+++ b/net/sched/sch_generic.c
@@ -648,7 +648,7 @@ struct Qdisc_ops noop_qdisc_ops __read_mostly = {
static struct netdev_queue noop_netdev_queue = {
RCU_POINTER_INITIALIZER(qdisc, &noop_qdisc),
- .qdisc_sleeping = &noop_qdisc,
+ RCU_POINTER_INITIALIZER(qdisc_sleeping, &noop_qdisc),
};
struct Qdisc noop_qdisc = {
@@ -1046,7 +1046,7 @@ static void qdisc_free_cb(struct rcu_head *head)
qdisc_free(q);
}
-static void qdisc_destroy(struct Qdisc *qdisc)
+static void __qdisc_destroy(struct Qdisc *qdisc)
{
const struct Qdisc_ops *ops = qdisc->ops;
@@ -1070,6 +1070,14 @@ static void qdisc_destroy(struct Qdisc *qdisc)
call_rcu(&qdisc->rcu, qdisc_free_cb);
}
+void qdisc_destroy(struct Qdisc *qdisc)
+{
+ if (qdisc->flags & TCQ_F_BUILTIN)
+ return;
+
+ __qdisc_destroy(qdisc);
+}
+
void qdisc_put(struct Qdisc *qdisc)
{
if (!qdisc)
@@ -1079,7 +1087,7 @@ void qdisc_put(struct Qdisc *qdisc)
!refcount_dec_and_test(&qdisc->refcnt))
return;
- qdisc_destroy(qdisc);
+ __qdisc_destroy(qdisc);
}
EXPORT_SYMBOL(qdisc_put);
@@ -1094,7 +1102,7 @@ void qdisc_put_unlocked(struct Qdisc *qdisc)
!refcount_dec_and_rtnl_lock(&qdisc->refcnt))
return;
- qdisc_destroy(qdisc);
+ __qdisc_destroy(qdisc);
rtnl_unlock();
}
EXPORT_SYMBOL(qdisc_put_unlocked);
@@ -1103,7 +1111,7 @@ EXPORT_SYMBOL(qdisc_put_unlocked);
struct Qdisc *dev_graft_qdisc(struct netdev_queue *dev_queue,
struct Qdisc *qdisc)
{
- struct Qdisc *oqdisc = dev_queue->qdisc_sleeping;
+ struct Qdisc *oqdisc = rtnl_dereference(dev_queue->qdisc_sleeping);
spinlock_t *root_lock;
root_lock = qdisc_lock(oqdisc);
@@ -1112,7 +1120,7 @@ struct Qdisc *dev_graft_qdisc(struct netdev_queue *dev_queue,
/* ... and graft new one */
if (qdisc == NULL)
qdisc = &noop_qdisc;
- dev_queue->qdisc_sleeping = qdisc;
+ rcu_assign_pointer(dev_queue->qdisc_sleeping, qdisc);
rcu_assign_pointer(dev_queue->qdisc, &noop_qdisc);
spin_unlock_bh(root_lock);
@@ -1125,12 +1133,12 @@ static void shutdown_scheduler_queue(struct net_device *dev,
struct netdev_queue *dev_queue,
void *_qdisc_default)
{
- struct Qdisc *qdisc = dev_queue->qdisc_sleeping;
+ struct Qdisc *qdisc = rtnl_dereference(dev_queue->qdisc_sleeping);
struct Qdisc *qdisc_default = _qdisc_default;
if (qdisc) {
rcu_assign_pointer(dev_queue->qdisc, qdisc_default);
- dev_queue->qdisc_sleeping = qdisc_default;
+ rcu_assign_pointer(dev_queue->qdisc_sleeping, qdisc_default);
qdisc_put(qdisc);
}
@@ -1154,7 +1162,7 @@ static void attach_one_default_qdisc(struct net_device *dev,
if (!netif_is_multiqueue(dev))
qdisc->flags |= TCQ_F_ONETXQUEUE | TCQ_F_NOPARENT;
- dev_queue->qdisc_sleeping = qdisc;
+ rcu_assign_pointer(dev_queue->qdisc_sleeping, qdisc);
}
static void attach_default_qdiscs(struct net_device *dev)
@@ -1167,7 +1175,7 @@ static void attach_default_qdiscs(struct net_device *dev)
if (!netif_is_multiqueue(dev) ||
dev->priv_flags & IFF_NO_QUEUE) {
netdev_for_each_tx_queue(dev, attach_one_default_qdisc, NULL);
- qdisc = txq->qdisc_sleeping;
+ qdisc = rtnl_dereference(txq->qdisc_sleeping);
rcu_assign_pointer(dev->qdisc, qdisc);
qdisc_refcount_inc(qdisc);
} else {
@@ -1186,7 +1194,7 @@ static void attach_default_qdiscs(struct net_device *dev)
netdev_for_each_tx_queue(dev, shutdown_scheduler_queue, &noop_qdisc);
dev->priv_flags |= IFF_NO_QUEUE;
netdev_for_each_tx_queue(dev, attach_one_default_qdisc, NULL);
- qdisc = txq->qdisc_sleeping;
+ qdisc = rtnl_dereference(txq->qdisc_sleeping);
rcu_assign_pointer(dev->qdisc, qdisc);
qdisc_refcount_inc(qdisc);
dev->priv_flags ^= IFF_NO_QUEUE;
@@ -1202,7 +1210,7 @@ static void transition_one_qdisc(struct net_device *dev,
struct netdev_queue *dev_queue,
void *_need_watchdog)
{
- struct Qdisc *new_qdisc = dev_queue->qdisc_sleeping;
+ struct Qdisc *new_qdisc = rtnl_dereference(dev_queue->qdisc_sleeping);
int *need_watchdog_p = _need_watchdog;
if (!(new_qdisc->flags & TCQ_F_BUILTIN))
@@ -1272,7 +1280,7 @@ static void dev_reset_queue(struct net_device *dev,
struct Qdisc *qdisc;
bool nolock;
- qdisc = dev_queue->qdisc_sleeping;
+ qdisc = rtnl_dereference(dev_queue->qdisc_sleeping);
if (!qdisc)
return;
@@ -1303,7 +1311,7 @@ static bool some_qdisc_is_busy(struct net_device *dev)
int val;
dev_queue = netdev_get_tx_queue(dev, i);
- q = dev_queue->qdisc_sleeping;
+ q = rtnl_dereference(dev_queue->qdisc_sleeping);
root_lock = qdisc_lock(q);
spin_lock_bh(root_lock);
@@ -1379,7 +1387,7 @@ EXPORT_SYMBOL(dev_deactivate);
static int qdisc_change_tx_queue_len(struct net_device *dev,
struct netdev_queue *dev_queue)
{
- struct Qdisc *qdisc = dev_queue->qdisc_sleeping;
+ struct Qdisc *qdisc = rtnl_dereference(dev_queue->qdisc_sleeping);
const struct Qdisc_ops *ops = qdisc->ops;
if (ops->change_tx_queue_len)
@@ -1404,7 +1412,7 @@ void mq_change_real_num_tx(struct Qdisc *sch, unsigned int new_real_tx)
unsigned int i;
for (i = new_real_tx; i < dev->real_num_tx_queues; i++) {
- qdisc = netdev_get_tx_queue(dev, i)->qdisc_sleeping;
+ qdisc = rtnl_dereference(netdev_get_tx_queue(dev, i)->qdisc_sleeping);
/* Only update the default qdiscs we created,
* qdiscs with handles are always hashed.
*/
@@ -1412,7 +1420,7 @@ void mq_change_real_num_tx(struct Qdisc *sch, unsigned int new_real_tx)
qdisc_hash_del(qdisc);
}
for (i = dev->real_num_tx_queues; i < new_real_tx; i++) {
- qdisc = netdev_get_tx_queue(dev, i)->qdisc_sleeping;
+ qdisc = rtnl_dereference(netdev_get_tx_queue(dev, i)->qdisc_sleeping);
if (qdisc != &noop_qdisc && !qdisc->handle)
qdisc_hash_add(qdisc, false);
}
@@ -1449,7 +1457,7 @@ static void dev_init_scheduler_queue(struct net_device *dev,
struct Qdisc *qdisc = _qdisc;
rcu_assign_pointer(dev_queue->qdisc, qdisc);
- dev_queue->qdisc_sleeping = qdisc;
+ rcu_assign_pointer(dev_queue->qdisc_sleeping, qdisc);
}
void dev_init_scheduler(struct net_device *dev)
diff --git a/net/sched/sch_ingress.c b/net/sched/sch_ingress.c
index 84838128b9c5..e43a45499372 100644
--- a/net/sched/sch_ingress.c
+++ b/net/sched/sch_ingress.c
@@ -80,6 +80,9 @@ static int ingress_init(struct Qdisc *sch, struct nlattr *opt,
struct net_device *dev = qdisc_dev(sch);
int err;
+ if (sch->parent != TC_H_INGRESS)
+ return -EOPNOTSUPP;
+
net_inc_ingress_queue();
mini_qdisc_pair_init(&q->miniqp, sch, &dev->miniq_ingress);
@@ -101,6 +104,9 @@ static void ingress_destroy(struct Qdisc *sch)
{
struct ingress_sched_data *q = qdisc_priv(sch);
+ if (sch->parent != TC_H_INGRESS)
+ return;
+
tcf_block_put_ext(q->block, sch, &q->block_info);
net_dec_ingress_queue();
}
@@ -134,7 +140,7 @@ static struct Qdisc_ops ingress_qdisc_ops __read_mostly = {
.cl_ops = &ingress_class_ops,
.id = "ingress",
.priv_size = sizeof(struct ingress_sched_data),
- .static_flags = TCQ_F_CPUSTATS,
+ .static_flags = TCQ_F_INGRESS | TCQ_F_CPUSTATS,
.init = ingress_init,
.destroy = ingress_destroy,
.dump = ingress_dump,
@@ -219,6 +225,9 @@ static int clsact_init(struct Qdisc *sch, struct nlattr *opt,
struct net_device *dev = qdisc_dev(sch);
int err;
+ if (sch->parent != TC_H_CLSACT)
+ return -EOPNOTSUPP;
+
net_inc_ingress_queue();
net_inc_egress_queue();
@@ -248,6 +257,9 @@ static void clsact_destroy(struct Qdisc *sch)
{
struct clsact_sched_data *q = qdisc_priv(sch);
+ if (sch->parent != TC_H_CLSACT)
+ return;
+
tcf_block_put_ext(q->egress_block, sch, &q->egress_block_info);
tcf_block_put_ext(q->ingress_block, sch, &q->ingress_block_info);
@@ -269,7 +281,7 @@ static struct Qdisc_ops clsact_qdisc_ops __read_mostly = {
.cl_ops = &clsact_class_ops,
.id = "clsact",
.priv_size = sizeof(struct clsact_sched_data),
- .static_flags = TCQ_F_CPUSTATS,
+ .static_flags = TCQ_F_INGRESS | TCQ_F_CPUSTATS,
.init = clsact_init,
.destroy = clsact_destroy,
.dump = ingress_dump,
diff --git a/net/sched/sch_mq.c b/net/sched/sch_mq.c
index d0bc660d7401..c860119a8f09 100644
--- a/net/sched/sch_mq.c
+++ b/net/sched/sch_mq.c
@@ -141,7 +141,7 @@ static int mq_dump(struct Qdisc *sch, struct sk_buff *skb)
* qdisc totals are added at end.
*/
for (ntx = 0; ntx < dev->num_tx_queues; ntx++) {
- qdisc = netdev_get_tx_queue(dev, ntx)->qdisc_sleeping;
+ qdisc = rtnl_dereference(netdev_get_tx_queue(dev, ntx)->qdisc_sleeping);
spin_lock_bh(qdisc_lock(qdisc));
gnet_stats_add_basic(&sch->bstats, qdisc->cpu_bstats,
@@ -202,7 +202,7 @@ static struct Qdisc *mq_leaf(struct Qdisc *sch, unsigned long cl)
{
struct netdev_queue *dev_queue = mq_queue_get(sch, cl);
- return dev_queue->qdisc_sleeping;
+ return rtnl_dereference(dev_queue->qdisc_sleeping);
}
static unsigned long mq_find(struct Qdisc *sch, u32 classid)
@@ -221,7 +221,7 @@ static int mq_dump_class(struct Qdisc *sch, unsigned long cl,
tcm->tcm_parent = TC_H_ROOT;
tcm->tcm_handle |= TC_H_MIN(cl);
- tcm->tcm_info = dev_queue->qdisc_sleeping->handle;
+ tcm->tcm_info = rtnl_dereference(dev_queue->qdisc_sleeping)->handle;
return 0;
}
@@ -230,7 +230,7 @@ static int mq_dump_class_stats(struct Qdisc *sch, unsigned long cl,
{
struct netdev_queue *dev_queue = mq_queue_get(sch, cl);
- sch = dev_queue->qdisc_sleeping;
+ sch = rtnl_dereference(dev_queue->qdisc_sleeping);
if (gnet_stats_copy_basic(d, sch->cpu_bstats, &sch->bstats, true) < 0 ||
qdisc_qstats_copy(d, sch) < 0)
return -1;
diff --git a/net/sched/sch_mqprio.c b/net/sched/sch_mqprio.c
index dc5a0ff50b14..ab69ff7577fc 100644
--- a/net/sched/sch_mqprio.c
+++ b/net/sched/sch_mqprio.c
@@ -557,7 +557,7 @@ static int mqprio_dump(struct Qdisc *sch, struct sk_buff *skb)
* qdisc totals are added at end.
*/
for (ntx = 0; ntx < dev->num_tx_queues; ntx++) {
- qdisc = netdev_get_tx_queue(dev, ntx)->qdisc_sleeping;
+ qdisc = rtnl_dereference(netdev_get_tx_queue(dev, ntx)->qdisc_sleeping);
spin_lock_bh(qdisc_lock(qdisc));
gnet_stats_add_basic(&sch->bstats, qdisc->cpu_bstats,
@@ -604,7 +604,7 @@ static struct Qdisc *mqprio_leaf(struct Qdisc *sch, unsigned long cl)
if (!dev_queue)
return NULL;
- return dev_queue->qdisc_sleeping;
+ return rtnl_dereference(dev_queue->qdisc_sleeping);
}
static unsigned long mqprio_find(struct Qdisc *sch, u32 classid)
@@ -637,7 +637,7 @@ static int mqprio_dump_class(struct Qdisc *sch, unsigned long cl,
tcm->tcm_parent = (tc < 0) ? 0 :
TC_H_MAKE(TC_H_MAJ(sch->handle),
TC_H_MIN(tc + TC_H_MIN_PRIORITY));
- tcm->tcm_info = dev_queue->qdisc_sleeping->handle;
+ tcm->tcm_info = rtnl_dereference(dev_queue->qdisc_sleeping)->handle;
} else {
tcm->tcm_parent = TC_H_ROOT;
tcm->tcm_info = 0;
@@ -693,7 +693,7 @@ static int mqprio_dump_class_stats(struct Qdisc *sch, unsigned long cl,
} else {
struct netdev_queue *dev_queue = mqprio_queue_get(sch, cl);
- sch = dev_queue->qdisc_sleeping;
+ sch = rtnl_dereference(dev_queue->qdisc_sleeping);
if (gnet_stats_copy_basic(d, sch->cpu_bstats,
&sch->bstats, true) < 0 ||
qdisc_qstats_copy(d, sch) < 0)
diff --git a/net/sched/sch_pie.c b/net/sched/sch_pie.c
index 2152a56d73f8..2da6250ec346 100644
--- a/net/sched/sch_pie.c
+++ b/net/sched/sch_pie.c
@@ -421,8 +421,10 @@ static void pie_timer(struct timer_list *t)
{
struct pie_sched_data *q = from_timer(q, t, adapt_timer);
struct Qdisc *sch = q->sch;
- spinlock_t *root_lock = qdisc_lock(qdisc_root_sleeping(sch));
+ spinlock_t *root_lock;
+ rcu_read_lock();
+ root_lock = qdisc_lock(qdisc_root_sleeping(sch));
spin_lock(root_lock);
pie_calculate_probability(&q->params, &q->vars, sch->qstats.backlog);
@@ -430,6 +432,7 @@ static void pie_timer(struct timer_list *t)
if (q->params.tupdate)
mod_timer(&q->adapt_timer, jiffies + q->params.tupdate);
spin_unlock(root_lock);
+ rcu_read_unlock();
}
static int pie_init(struct Qdisc *sch, struct nlattr *opt,
diff --git a/net/sched/sch_red.c b/net/sched/sch_red.c
index 98129324e157..16277b6a0238 100644
--- a/net/sched/sch_red.c
+++ b/net/sched/sch_red.c
@@ -321,12 +321,15 @@ static inline void red_adaptative_timer(struct timer_list *t)
{
struct red_sched_data *q = from_timer(q, t, adapt_timer);
struct Qdisc *sch = q->sch;
- spinlock_t *root_lock = qdisc_lock(qdisc_root_sleeping(sch));
+ spinlock_t *root_lock;
+ rcu_read_lock();
+ root_lock = qdisc_lock(qdisc_root_sleeping(sch));
spin_lock(root_lock);
red_adaptative_algo(&q->parms, &q->vars);
mod_timer(&q->adapt_timer, jiffies + HZ/2);
spin_unlock(root_lock);
+ rcu_read_unlock();
}
static int red_init(struct Qdisc *sch, struct nlattr *opt,
diff --git a/net/sched/sch_sfq.c b/net/sched/sch_sfq.c
index abd436307d6a..66dcb18638fe 100644
--- a/net/sched/sch_sfq.c
+++ b/net/sched/sch_sfq.c
@@ -606,10 +606,12 @@ static void sfq_perturbation(struct timer_list *t)
{
struct sfq_sched_data *q = from_timer(q, t, perturb_timer);
struct Qdisc *sch = q->sch;
- spinlock_t *root_lock = qdisc_lock(qdisc_root_sleeping(sch));
+ spinlock_t *root_lock;
siphash_key_t nkey;
get_random_bytes(&nkey, sizeof(nkey));
+ rcu_read_lock();
+ root_lock = qdisc_lock(qdisc_root_sleeping(sch));
spin_lock(root_lock);
q->perturbation = nkey;
if (!q->filter_list && q->tail)
@@ -618,6 +620,7 @@ static void sfq_perturbation(struct timer_list *t)
if (q->perturb_period)
mod_timer(&q->perturb_timer, jiffies + q->perturb_period);
+ rcu_read_unlock();
}
static int sfq_change(struct Qdisc *sch, struct nlattr *opt)
diff --git a/net/sched/sch_taprio.c b/net/sched/sch_taprio.c
index 76db9a10ef50..cf0e61ed9225 100644
--- a/net/sched/sch_taprio.c
+++ b/net/sched/sch_taprio.c
@@ -797,6 +797,9 @@ static struct sk_buff *taprio_dequeue_tc_priority(struct Qdisc *sch,
taprio_next_tc_txq(dev, tc, &q->cur_txq[tc]);
+ if (q->cur_txq[tc] >= dev->num_tx_queues)
+ q->cur_txq[tc] = first_txq;
+
if (skb)
return skb;
} while (q->cur_txq[tc] != first_txq);
@@ -2358,7 +2361,7 @@ static struct Qdisc *taprio_leaf(struct Qdisc *sch, unsigned long cl)
if (!dev_queue)
return NULL;
- return dev_queue->qdisc_sleeping;
+ return rtnl_dereference(dev_queue->qdisc_sleeping);
}
static unsigned long taprio_find(struct Qdisc *sch, u32 classid)
@@ -2377,7 +2380,7 @@ static int taprio_dump_class(struct Qdisc *sch, unsigned long cl,
tcm->tcm_parent = TC_H_ROOT;
tcm->tcm_handle |= TC_H_MIN(cl);
- tcm->tcm_info = dev_queue->qdisc_sleeping->handle;
+ tcm->tcm_info = rtnl_dereference(dev_queue->qdisc_sleeping)->handle;
return 0;
}
@@ -2389,7 +2392,7 @@ static int taprio_dump_class_stats(struct Qdisc *sch, unsigned long cl,
{
struct netdev_queue *dev_queue = taprio_queue_get(sch, cl);
- sch = dev_queue->qdisc_sleeping;
+ sch = rtnl_dereference(dev_queue->qdisc_sleeping);
if (gnet_stats_copy_basic(d, NULL, &sch->bstats, true) < 0 ||
qdisc_qstats_copy(d, sch) < 0)
return -1;
diff --git a/net/sched/sch_teql.c b/net/sched/sch_teql.c
index 16f9238aa51d..7721239c185f 100644
--- a/net/sched/sch_teql.c
+++ b/net/sched/sch_teql.c
@@ -297,7 +297,7 @@ restart:
struct net_device *slave = qdisc_dev(q);
struct netdev_queue *slave_txq = netdev_get_tx_queue(slave, 0);
- if (slave_txq->qdisc_sleeping != q)
+ if (rcu_access_pointer(slave_txq->qdisc_sleeping) != q)
continue;
if (netif_xmit_stopped(netdev_get_tx_queue(slave, subq)) ||
!netif_running(slave)) {