summaryrefslogtreecommitdiffstats
path: root/net
diff options
context:
space:
mode:
Diffstat (limited to 'net')
-rw-r--r--net/8021q/vlan_dev.c4
-rw-r--r--net/batman-adv/netlink.c6
-rw-r--r--net/bridge/br.c5
-rw-r--r--net/bridge/br_fdb.c46
-rw-r--r--net/bridge/br_input.c21
-rw-r--r--net/bridge/br_mdb.c11
-rw-r--r--net/bridge/br_multicast.c12
-rw-r--r--net/bridge/br_netlink.c21
-rw-r--r--net/bridge/br_private.h5
-rw-r--r--net/bridge/br_switchdev.c6
-rw-r--r--net/bridge/br_vlan.c4
-rw-r--r--net/can/j1939/transport.c2
-rw-r--r--net/core/bpf_sk_storage.c35
-rw-r--r--net/core/dev.c109
-rw-r--r--net/core/dev.h7
-rw-r--r--net/core/dev_ioctl.c2
-rw-r--r--net/core/devlink.c219
-rw-r--r--net/core/drop_monitor.c12
-rw-r--r--net/core/failover.c6
-rw-r--r--net/core/gen_stats.c16
-rw-r--r--net/core/gro.c70
-rw-r--r--net/core/net-sysfs.c4
-rw-r--r--net/core/net_namespace.c5
-rw-r--r--net/core/rtnetlink.c90
-rw-r--r--net/core/skbuff.c90
-rw-r--r--net/core/sock.c20
-rw-r--r--net/core/sock_reuseport.c94
-rw-r--r--net/core/utils.c4
-rw-r--r--net/dcb/dcbnl.c134
-rw-r--r--net/dccp/dccp.h1
-rw-r--r--net/dccp/ipv6.c15
-rw-r--r--net/dccp/proto.c8
-rw-r--r--net/dsa/dsa2.c9
-rw-r--r--net/dsa/slave.c13
-rw-r--r--net/ethtool/channels.c19
-rw-r--r--net/ethtool/common.c80
-rw-r--r--net/ethtool/common.h1
-rw-r--r--net/ethtool/ioctl.c41
-rw-r--r--net/ethtool/linkstate.c24
-rw-r--r--net/ieee802154/core.c3
-rw-r--r--net/ieee802154/nl802154.c6
-rw-r--r--net/ipv4/Makefile2
-rw-r--r--net/ipv4/af_inet.c4
-rw-r--r--net/ipv4/inet_fragment.c14
-rw-r--r--net/ipv4/ip_fragment.c19
-rw-r--r--net/ipv4/ip_gre.c2
-rw-r--r--net/ipv4/ip_sockglue.c3
-rw-r--r--net/ipv4/metrics.c3
-rw-r--r--net/ipv4/proc.c1
-rw-r--r--net/ipv4/sysctl_net_ipv4.c43
-rw-r--r--net/ipv4/tcp.c5
-rw-r--r--net/ipv4/tcp_dctcp.c23
-rw-r--r--net/ipv4/tcp_input.c51
-rw-r--r--net/ipv4/tcp_ipv4.c8
-rw-r--r--net/ipv4/tcp_plb.c109
-rw-r--r--net/ipv4/udp.c21
-rw-r--r--net/ipv6/af_inet6.c9
-rw-r--r--net/ipv6/datagram.c3
-rw-r--r--net/ipv6/ip6_gre.c20
-rw-r--r--net/ipv6/ipv6_sockglue.c6
-rw-r--r--net/ipv6/netfilter/nf_conntrack_reasm.c2
-rw-r--r--net/ipv6/ping.c6
-rw-r--r--net/ipv6/raw.c2
-rw-r--r--net/ipv6/reassembly.c13
-rw-r--r--net/ipv6/seg6_local.c4
-rw-r--r--net/ipv6/tcp_ipv6.c8
-rw-r--r--net/ipv6/udp.c10
-rw-r--r--net/l2tp/l2tp_ip6.c2
-rw-r--r--net/mac80211/agg-rx.c25
-rw-r--r--net/mac80211/agg-tx.c2
-rw-r--r--net/mac80211/cfg.c43
-rw-r--r--net/mac80211/debugfs.c4
-rw-r--r--net/mac80211/debugfs_netdev.c3
-rw-r--r--net/mac80211/debugfs_sta.c148
-rw-r--r--net/mac80211/debugfs_sta.h12
-rw-r--r--net/mac80211/driver-ops.c27
-rw-r--r--net/mac80211/driver-ops.h16
-rw-r--r--net/mac80211/ieee80211_i.h22
-rw-r--r--net/mac80211/iface.c69
-rw-r--r--net/mac80211/link.c17
-rw-r--r--net/mac80211/main.c23
-rw-r--r--net/mac80211/mlme.c131
-rw-r--r--net/mac80211/rc80211_minstrel_ht.c3
-rw-r--r--net/mac80211/rc80211_minstrel_ht.h1
-rw-r--r--net/mac80211/rx.c3
-rw-r--r--net/mac80211/sta_info.c118
-rw-r--r--net/mac80211/sta_info.h7
-rw-r--r--net/mac80211/tdls.c1
-rw-r--r--net/mac80211/tx.c30
-rw-r--r--net/mac80211/util.c246
-rw-r--r--net/mac80211/wme.c63
-rw-r--r--net/mac80211/wme.h4
-rw-r--r--net/mac802154/cfg.c6
-rw-r--r--net/mac802154/driver-ops.h253
-rw-r--r--net/mac802154/ieee802154_i.h56
-rw-r--r--net/mac802154/iface.c44
-rw-r--r--net/mac802154/main.c2
-rw-r--r--net/mac802154/rx.c29
-rw-r--r--net/mac802154/tx.c132
-rw-r--r--net/mac802154/util.c71
-rw-r--r--net/mpls/af_mpls.c4
-rw-r--r--net/mptcp/protocol.c11
-rw-r--r--net/mptcp/sockopt.c34
-rw-r--r--net/mptcp/subflow.c2
-rw-r--r--net/netfilter/Kconfig6
-rw-r--r--net/netfilter/Makefile4
-rw-r--r--net/netfilter/ipvs/ip_vs_ctl.c4
-rw-r--r--net/netfilter/nf_conntrack_helper.c100
-rw-r--r--net/netfilter/nf_tables_api.c41
-rw-r--r--net/netfilter/nf_tables_core.c2
-rw-r--r--net/netfilter/nft_inner.c384
-rw-r--r--net/netfilter/nft_meta.c62
-rw-r--r--net/netfilter/nft_objref.c22
-rw-r--r--net/netfilter/nft_payload.c134
-rw-r--r--net/netlink/af_netlink.c42
-rw-r--r--net/netlink/genetlink.c495
-rw-r--r--net/nfc/nci/core.c8
-rw-r--r--net/nfc/nci/hci.c4
-rw-r--r--net/nfc/rawsock.c3
-rw-r--r--net/openvswitch/conntrack.c105
-rw-r--r--net/openvswitch/datapath.c4
-rw-r--r--net/openvswitch/flow_netlink.c2
-rw-r--r--net/openvswitch/flow_table.c9
-rw-r--r--net/openvswitch/vport-geneve.c2
-rw-r--r--net/openvswitch/vport-gre.c2
-rw-r--r--net/openvswitch/vport-netdev.c2
-rw-r--r--net/openvswitch/vport-vxlan.c2
-rw-r--r--net/packet/af_packet.c11
-rw-r--r--net/rds/message.c2
-rw-r--r--net/rds/send.c3
-rw-r--r--net/rds/tcp.c3
-rw-r--r--net/rxrpc/Makefile1
-rw-r--r--net/rxrpc/af_rxrpc.c5
-rw-r--r--net/rxrpc/ar-internal.h224
-rw-r--r--net/rxrpc/call_accept.c8
-rw-r--r--net/rxrpc/call_event.c427
-rw-r--r--net/rxrpc/call_object.c63
-rw-r--r--net/rxrpc/conn_client.c3
-rw-r--r--net/rxrpc/conn_object.c4
-rw-r--r--net/rxrpc/input.c770
-rw-r--r--net/rxrpc/insecure.c16
-rw-r--r--net/rxrpc/local_object.c20
-rw-r--r--net/rxrpc/misc.c23
-rw-r--r--net/rxrpc/net_ns.c2
-rw-r--r--net/rxrpc/output.c390
-rw-r--r--net/rxrpc/peer_event.c282
-rw-r--r--net/rxrpc/peer_object.c7
-rw-r--r--net/rxrpc/proc.c110
-rw-r--r--net/rxrpc/protocol.h9
-rw-r--r--net/rxrpc/recvmsg.c268
-rw-r--r--net/rxrpc/rxkad.c245
-rw-r--r--net/rxrpc/sendmsg.c218
-rw-r--r--net/rxrpc/skbuff.c20
-rw-r--r--net/rxrpc/sysctl.c11
-rw-r--r--net/rxrpc/txbuf.c135
-rw-r--r--net/sched/act_ct.c124
-rw-r--r--net/sched/act_skbedit.c14
-rw-r--r--net/sched/cls_api.c7
-rw-r--r--net/sctp/associola.c4
-rw-r--r--net/sctp/socket.c29
-rw-r--r--net/sctp/stream_interleave.c12
-rw-r--r--net/sctp/ulpqueue.c10
-rw-r--r--net/socket.c8
-rw-r--r--net/wireless/core.h5
-rw-r--r--net/wireless/mlme.c4
-rw-r--r--net/wireless/nl80211.c23
-rw-r--r--net/wireless/nl80211.h3
-rw-r--r--net/wireless/sme.c26
-rw-r--r--net/wireless/util.c4
169 files changed, 5253 insertions, 3074 deletions
diff --git a/net/8021q/vlan_dev.c b/net/8021q/vlan_dev.c
index e1bb41a443c4..296d0145932f 100644
--- a/net/8021q/vlan_dev.c
+++ b/net/8021q/vlan_dev.c
@@ -712,13 +712,13 @@ static void vlan_dev_get_stats64(struct net_device *dev,
p = per_cpu_ptr(vlan_dev_priv(dev)->vlan_pcpu_stats, i);
do {
- start = u64_stats_fetch_begin_irq(&p->syncp);
+ start = u64_stats_fetch_begin(&p->syncp);
rxpackets = u64_stats_read(&p->rx_packets);
rxbytes = u64_stats_read(&p->rx_bytes);
rxmulticast = u64_stats_read(&p->rx_multicast);
txpackets = u64_stats_read(&p->tx_packets);
txbytes = u64_stats_read(&p->tx_bytes);
- } while (u64_stats_fetch_retry_irq(&p->syncp, start));
+ } while (u64_stats_fetch_retry(&p->syncp, start));
stats->rx_packets += rxpackets;
stats->rx_bytes += rxbytes;
diff --git a/net/batman-adv/netlink.c b/net/batman-adv/netlink.c
index a5e4a4e976cf..ad5714f737be 100644
--- a/net/batman-adv/netlink.c
+++ b/net/batman-adv/netlink.c
@@ -1267,7 +1267,8 @@ batadv_get_vlan_from_info(struct batadv_priv *bat_priv, struct net *net,
*
* Return: 0 on success or negative error number in case of failure
*/
-static int batadv_pre_doit(const struct genl_ops *ops, struct sk_buff *skb,
+static int batadv_pre_doit(const struct genl_split_ops *ops,
+ struct sk_buff *skb,
struct genl_info *info)
{
struct net *net = genl_info_net(info);
@@ -1332,7 +1333,8 @@ err_put_softif:
* @skb: Netlink message with request data
* @info: receiver information
*/
-static void batadv_post_doit(const struct genl_ops *ops, struct sk_buff *skb,
+static void batadv_post_doit(const struct genl_split_ops *ops,
+ struct sk_buff *skb,
struct genl_info *info)
{
struct batadv_hard_iface *hard_iface;
diff --git a/net/bridge/br.c b/net/bridge/br.c
index 96e91d69a9a8..4f5098d33a46 100644
--- a/net/bridge/br.c
+++ b/net/bridge/br.c
@@ -166,13 +166,14 @@ static int br_switchdev_event(struct notifier_block *unused,
case SWITCHDEV_FDB_ADD_TO_BRIDGE:
fdb_info = ptr;
err = br_fdb_external_learn_add(br, p, fdb_info->addr,
- fdb_info->vid, false);
+ fdb_info->vid,
+ fdb_info->locked, false);
if (err) {
err = notifier_from_errno(err);
break;
}
br_fdb_offloaded_set(br, p, fdb_info->addr,
- fdb_info->vid, true);
+ fdb_info->vid, fdb_info->offloaded);
break;
case SWITCHDEV_FDB_DEL_TO_BRIDGE:
fdb_info = ptr;
diff --git a/net/bridge/br_fdb.c b/net/bridge/br_fdb.c
index e7f4fccb6adb..e69a872bfc1d 100644
--- a/net/bridge/br_fdb.c
+++ b/net/bridge/br_fdb.c
@@ -105,6 +105,7 @@ static int fdb_fill_info(struct sk_buff *skb, const struct net_bridge *br,
struct nda_cacheinfo ci;
struct nlmsghdr *nlh;
struct ndmsg *ndm;
+ u32 ext_flags = 0;
nlh = nlmsg_put(skb, portid, seq, type, sizeof(*ndm), flags);
if (nlh == NULL)
@@ -125,11 +126,16 @@ static int fdb_fill_info(struct sk_buff *skb, const struct net_bridge *br,
ndm->ndm_flags |= NTF_EXT_LEARNED;
if (test_bit(BR_FDB_STICKY, &fdb->flags))
ndm->ndm_flags |= NTF_STICKY;
+ if (test_bit(BR_FDB_LOCKED, &fdb->flags))
+ ext_flags |= NTF_EXT_LOCKED;
if (nla_put(skb, NDA_LLADDR, ETH_ALEN, &fdb->key.addr))
goto nla_put_failure;
if (nla_put_u32(skb, NDA_MASTER, br->dev->ifindex))
goto nla_put_failure;
+ if (nla_put_u32(skb, NDA_FLAGS_EXT, ext_flags))
+ goto nla_put_failure;
+
ci.ndm_used = jiffies_to_clock_t(now - fdb->used);
ci.ndm_confirmed = 0;
ci.ndm_updated = jiffies_to_clock_t(now - fdb->updated);
@@ -171,6 +177,7 @@ static inline size_t fdb_nlmsg_size(void)
return NLMSG_ALIGN(sizeof(struct ndmsg))
+ nla_total_size(ETH_ALEN) /* NDA_LLADDR */
+ nla_total_size(sizeof(u32)) /* NDA_MASTER */
+ + nla_total_size(sizeof(u32)) /* NDA_FLAGS_EXT */
+ nla_total_size(sizeof(u16)) /* NDA_VLAN */
+ nla_total_size(sizeof(struct nda_cacheinfo))
+ nla_total_size(0) /* NDA_FDB_EXT_ATTRS */
@@ -879,6 +886,11 @@ void br_fdb_update(struct net_bridge *br, struct net_bridge_port *source,
&fdb->flags)))
clear_bit(BR_FDB_ADDED_BY_EXT_LEARN,
&fdb->flags);
+ /* Clear locked flag when roaming to an
+ * unlocked port.
+ */
+ if (unlikely(test_bit(BR_FDB_LOCKED, &fdb->flags)))
+ clear_bit(BR_FDB_LOCKED, &fdb->flags);
}
if (unlikely(test_bit(BR_FDB_ADDED_BY_USER, &flags)))
@@ -1082,6 +1094,9 @@ static int fdb_add_entry(struct net_bridge *br, struct net_bridge_port *source,
modified = true;
}
+ if (test_and_clear_bit(BR_FDB_LOCKED, &fdb->flags))
+ modified = true;
+
if (fdb_handle_notify(fdb, notify))
modified = true;
@@ -1124,7 +1139,7 @@ static int __br_fdb_add(struct ndmsg *ndm, struct net_bridge *br,
"FDB entry towards bridge must be permanent");
return -EINVAL;
}
- err = br_fdb_external_learn_add(br, p, addr, vid, true);
+ err = br_fdb_external_learn_add(br, p, addr, vid, false, true);
} else {
spin_lock_bh(&br->hash_lock);
err = fdb_add_entry(br, p, addr, ndm, nlh_flags, vid, nfea_tb);
@@ -1150,6 +1165,7 @@ int br_fdb_add(struct ndmsg *ndm, struct nlattr *tb[],
struct net_bridge_port *p = NULL;
struct net_bridge_vlan *v;
struct net_bridge *br = NULL;
+ u32 ext_flags = 0;
int err = 0;
trace_br_fdb_add(ndm, dev, addr, vid, nlh_flags);
@@ -1178,6 +1194,14 @@ int br_fdb_add(struct ndmsg *ndm, struct nlattr *tb[],
vg = nbp_vlan_group(p);
}
+ if (tb[NDA_FLAGS_EXT])
+ ext_flags = nla_get_u32(tb[NDA_FLAGS_EXT]);
+
+ if (ext_flags & NTF_EXT_LOCKED) {
+ NL_SET_ERR_MSG_MOD(extack, "Cannot add FDB entry with \"locked\" flag set");
+ return -EINVAL;
+ }
+
if (tb[NDA_FDB_EXT_ATTRS]) {
attr = tb[NDA_FDB_EXT_ATTRS];
err = nla_parse_nested(nfea_tb, NFEA_MAX, attr,
@@ -1353,7 +1377,7 @@ void br_fdb_unsync_static(struct net_bridge *br, struct net_bridge_port *p)
}
int br_fdb_external_learn_add(struct net_bridge *br, struct net_bridge_port *p,
- const unsigned char *addr, u16 vid,
+ const unsigned char *addr, u16 vid, bool locked,
bool swdev_notify)
{
struct net_bridge_fdb_entry *fdb;
@@ -1362,6 +1386,9 @@ int br_fdb_external_learn_add(struct net_bridge *br, struct net_bridge_port *p,
trace_br_fdb_external_learn_add(br, p, addr, vid);
+ if (locked && (!p || !(p->flags & BR_PORT_MAB)))
+ return -EINVAL;
+
spin_lock_bh(&br->hash_lock);
fdb = br_fdb_find(br, addr, vid);
@@ -1374,6 +1401,9 @@ int br_fdb_external_learn_add(struct net_bridge *br, struct net_bridge_port *p,
if (!p)
flags |= BIT(BR_FDB_LOCAL);
+ if (locked)
+ flags |= BIT(BR_FDB_LOCKED);
+
fdb = fdb_create(br, p, addr, vid, flags);
if (!fdb) {
err = -ENOMEM;
@@ -1381,6 +1411,13 @@ int br_fdb_external_learn_add(struct net_bridge *br, struct net_bridge_port *p,
}
fdb_notify(br, fdb, RTM_NEWNEIGH, swdev_notify);
} else {
+ if (locked &&
+ (!test_bit(BR_FDB_LOCKED, &fdb->flags) ||
+ READ_ONCE(fdb->dst) != p)) {
+ err = -EINVAL;
+ goto err_unlock;
+ }
+
fdb->updated = jiffies;
if (READ_ONCE(fdb->dst) != p) {
@@ -1397,6 +1434,11 @@ int br_fdb_external_learn_add(struct net_bridge *br, struct net_bridge_port *p,
modified = true;
}
+ if (locked != test_bit(BR_FDB_LOCKED, &fdb->flags)) {
+ change_bit(BR_FDB_LOCKED, &fdb->flags);
+ modified = true;
+ }
+
if (swdev_notify)
set_bit(BR_FDB_ADDED_BY_USER, &fdb->flags);
diff --git a/net/bridge/br_input.c b/net/bridge/br_input.c
index 68b3e850bcb9..d04d2205ad4e 100644
--- a/net/bridge/br_input.c
+++ b/net/bridge/br_input.c
@@ -109,9 +109,26 @@ int br_handle_frame_finish(struct net *net, struct sock *sk, struct sk_buff *skb
struct net_bridge_fdb_entry *fdb_src =
br_fdb_find_rcu(br, eth_hdr(skb)->h_source, vid);
- if (!fdb_src || READ_ONCE(fdb_src->dst) != p ||
- test_bit(BR_FDB_LOCAL, &fdb_src->flags))
+ if (!fdb_src) {
+ /* FDB miss. Create locked FDB entry if MAB is enabled
+ * and drop the packet.
+ */
+ if (p->flags & BR_PORT_MAB)
+ br_fdb_update(br, p, eth_hdr(skb)->h_source,
+ vid, BIT(BR_FDB_LOCKED));
goto drop;
+ } else if (READ_ONCE(fdb_src->dst) != p ||
+ test_bit(BR_FDB_LOCAL, &fdb_src->flags)) {
+ /* FDB mismatch. Drop the packet without roaming. */
+ goto drop;
+ } else if test_bit(BR_FDB_LOCKED, &fdb_src->flags) {
+ /* FDB match, but entry is locked. Refresh it and drop
+ * the packet.
+ */
+ br_fdb_update(br, p, eth_hdr(skb)->h_source, vid,
+ BIT(BR_FDB_LOCKED));
+ goto drop;
+ }
}
nbp_switchdev_frame_mark(p, skb);
diff --git a/net/bridge/br_mdb.c b/net/bridge/br_mdb.c
index 589ff497d50c..321be94c445a 100644
--- a/net/bridge/br_mdb.c
+++ b/net/bridge/br_mdb.c
@@ -866,7 +866,6 @@ static int br_mdb_add_group(struct net_bridge *br, struct net_bridge_port *port,
unsigned long now = jiffies;
unsigned char flags = 0;
u8 filter_mode;
- int err;
__mdb_entry_to_br_ip(entry, &group, mdb_attrs);
@@ -892,13 +891,9 @@ static int br_mdb_add_group(struct net_bridge *br, struct net_bridge_port *port,
return -EINVAL;
}
- mp = br_mdb_ip_get(br, &group);
- if (!mp) {
- mp = br_multicast_new_group(br, &group);
- err = PTR_ERR_OR_ZERO(mp);
- if (err)
- return err;
- }
+ mp = br_multicast_new_group(br, &group);
+ if (IS_ERR(mp))
+ return PTR_ERR(mp);
/* host join */
if (!port) {
diff --git a/net/bridge/br_multicast.c b/net/bridge/br_multicast.c
index db4f2641d1cd..5e988f0ed2c0 100644
--- a/net/bridge/br_multicast.c
+++ b/net/bridge/br_multicast.c
@@ -2669,7 +2669,7 @@ static int br_ip4_multicast_igmp3_report(struct net_bridge_mcast *brmctx,
if (!pmctx || igmpv2)
continue;
- spin_lock_bh(&brmctx->br->multicast_lock);
+ spin_lock(&brmctx->br->multicast_lock);
if (!br_multicast_ctx_should_use(brmctx, pmctx))
goto unlock_continue;
@@ -2717,7 +2717,7 @@ static int br_ip4_multicast_igmp3_report(struct net_bridge_mcast *brmctx,
if (changed)
br_mdb_notify(brmctx->br->dev, mdst, pg, RTM_NEWMDB);
unlock_continue:
- spin_unlock_bh(&brmctx->br->multicast_lock);
+ spin_unlock(&brmctx->br->multicast_lock);
}
return err;
@@ -2807,7 +2807,7 @@ static int br_ip6_multicast_mld2_report(struct net_bridge_mcast *brmctx,
if (!pmctx || mldv1)
continue;
- spin_lock_bh(&brmctx->br->multicast_lock);
+ spin_lock(&brmctx->br->multicast_lock);
if (!br_multicast_ctx_should_use(brmctx, pmctx))
goto unlock_continue;
@@ -2859,7 +2859,7 @@ static int br_ip6_multicast_mld2_report(struct net_bridge_mcast *brmctx,
if (changed)
br_mdb_notify(brmctx->br->dev, mdst, pg, RTM_NEWMDB);
unlock_continue:
- spin_unlock_bh(&brmctx->br->multicast_lock);
+ spin_unlock(&brmctx->br->multicast_lock);
}
return err;
@@ -4899,9 +4899,9 @@ void br_multicast_get_stats(const struct net_bridge *br,
unsigned int start;
do {
- start = u64_stats_fetch_begin_irq(&cpu_stats->syncp);
+ start = u64_stats_fetch_begin(&cpu_stats->syncp);
memcpy(&temp, &cpu_stats->mstats, sizeof(temp));
- } while (u64_stats_fetch_retry_irq(&cpu_stats->syncp, start));
+ } while (u64_stats_fetch_retry(&cpu_stats->syncp, start));
mcast_stats_add_dir(tdst.igmp_v1queries, temp.igmp_v1queries);
mcast_stats_add_dir(tdst.igmp_v2queries, temp.igmp_v2queries);
diff --git a/net/bridge/br_netlink.c b/net/bridge/br_netlink.c
index d087fd4c784a..4316cc82ae17 100644
--- a/net/bridge/br_netlink.c
+++ b/net/bridge/br_netlink.c
@@ -188,6 +188,7 @@ static inline size_t br_port_info_size(void)
+ nla_total_size(1) /* IFLA_BRPORT_NEIGH_SUPPRESS */
+ nla_total_size(1) /* IFLA_BRPORT_ISOLATED */
+ nla_total_size(1) /* IFLA_BRPORT_LOCKED */
+ + nla_total_size(1) /* IFLA_BRPORT_MAB */
+ nla_total_size(sizeof(struct ifla_bridge_id)) /* IFLA_BRPORT_ROOT_ID */
+ nla_total_size(sizeof(struct ifla_bridge_id)) /* IFLA_BRPORT_BRIDGE_ID */
+ nla_total_size(sizeof(u16)) /* IFLA_BRPORT_DESIGNATED_PORT */
@@ -274,7 +275,8 @@ static int br_port_fill_attrs(struct sk_buff *skb,
nla_put_u8(skb, IFLA_BRPORT_MRP_IN_OPEN,
!!(p->flags & BR_MRP_LOST_IN_CONT)) ||
nla_put_u8(skb, IFLA_BRPORT_ISOLATED, !!(p->flags & BR_ISOLATED)) ||
- nla_put_u8(skb, IFLA_BRPORT_LOCKED, !!(p->flags & BR_PORT_LOCKED)))
+ nla_put_u8(skb, IFLA_BRPORT_LOCKED, !!(p->flags & BR_PORT_LOCKED)) ||
+ nla_put_u8(skb, IFLA_BRPORT_MAB, !!(p->flags & BR_PORT_MAB)))
return -EMSGSIZE;
timerval = br_timer_value(&p->message_age_timer);
@@ -876,6 +878,7 @@ static const struct nla_policy br_port_policy[IFLA_BRPORT_MAX + 1] = {
[IFLA_BRPORT_NEIGH_SUPPRESS] = { .type = NLA_U8 },
[IFLA_BRPORT_ISOLATED] = { .type = NLA_U8 },
[IFLA_BRPORT_LOCKED] = { .type = NLA_U8 },
+ [IFLA_BRPORT_MAB] = { .type = NLA_U8 },
[IFLA_BRPORT_BACKUP_PORT] = { .type = NLA_U32 },
[IFLA_BRPORT_MCAST_EHT_HOSTS_LIMIT] = { .type = NLA_U32 },
};
@@ -943,6 +946,22 @@ static int br_setport(struct net_bridge_port *p, struct nlattr *tb[],
br_set_port_flag(p, tb, IFLA_BRPORT_NEIGH_SUPPRESS, BR_NEIGH_SUPPRESS);
br_set_port_flag(p, tb, IFLA_BRPORT_ISOLATED, BR_ISOLATED);
br_set_port_flag(p, tb, IFLA_BRPORT_LOCKED, BR_PORT_LOCKED);
+ br_set_port_flag(p, tb, IFLA_BRPORT_MAB, BR_PORT_MAB);
+
+ if ((p->flags & BR_PORT_MAB) &&
+ (!(p->flags & BR_PORT_LOCKED) || !(p->flags & BR_LEARNING))) {
+ NL_SET_ERR_MSG(extack, "Bridge port must be locked and have learning enabled when MAB is enabled");
+ p->flags = old_flags;
+ return -EINVAL;
+ } else if (!(p->flags & BR_PORT_MAB) && (old_flags & BR_PORT_MAB)) {
+ struct net_bridge_fdb_flush_desc desc = {
+ .flags = BIT(BR_FDB_LOCKED),
+ .flags_mask = BIT(BR_FDB_LOCKED),
+ .port_ifindex = p->dev->ifindex,
+ };
+
+ br_fdb_flush(p->br, &desc);
+ }
changed_mask = old_flags ^ p->flags;
diff --git a/net/bridge/br_private.h b/net/bridge/br_private.h
index 06e5f6faa431..4c4fda930068 100644
--- a/net/bridge/br_private.h
+++ b/net/bridge/br_private.h
@@ -251,7 +251,8 @@ enum {
BR_FDB_ADDED_BY_EXT_LEARN,
BR_FDB_OFFLOADED,
BR_FDB_NOTIFY,
- BR_FDB_NOTIFY_INACTIVE
+ BR_FDB_NOTIFY_INACTIVE,
+ BR_FDB_LOCKED,
};
struct net_bridge_fdb_key {
@@ -810,7 +811,7 @@ int br_fdb_sync_static(struct net_bridge *br, struct net_bridge_port *p);
void br_fdb_unsync_static(struct net_bridge *br, struct net_bridge_port *p);
int br_fdb_external_learn_add(struct net_bridge *br, struct net_bridge_port *p,
const unsigned char *addr, u16 vid,
- bool swdev_notify);
+ bool locked, bool swdev_notify);
int br_fdb_external_learn_del(struct net_bridge *br, struct net_bridge_port *p,
const unsigned char *addr, u16 vid,
bool swdev_notify);
diff --git a/net/bridge/br_switchdev.c b/net/bridge/br_switchdev.c
index 8f3d76c751dd..7eb6fd5bb917 100644
--- a/net/bridge/br_switchdev.c
+++ b/net/bridge/br_switchdev.c
@@ -71,7 +71,7 @@ bool nbp_switchdev_allowed_egress(const struct net_bridge_port *p,
}
/* Flags that can be offloaded to hardware */
-#define BR_PORT_FLAGS_HW_OFFLOAD (BR_LEARNING | BR_FLOOD | \
+#define BR_PORT_FLAGS_HW_OFFLOAD (BR_LEARNING | BR_FLOOD | BR_PORT_MAB | \
BR_MCAST_FLOOD | BR_BCAST_FLOOD | BR_PORT_LOCKED | \
BR_HAIRPIN_MODE | BR_ISOLATED | BR_MULTICAST_TO_UNICAST)
@@ -136,6 +136,7 @@ static void br_switchdev_fdb_populate(struct net_bridge *br,
item->added_by_user = test_bit(BR_FDB_ADDED_BY_USER, &fdb->flags);
item->offloaded = test_bit(BR_FDB_OFFLOADED, &fdb->flags);
item->is_local = test_bit(BR_FDB_LOCAL, &fdb->flags);
+ item->locked = false;
item->info.dev = (!p || item->is_local) ? br->dev : p->dev;
item->info.ctx = ctx;
}
@@ -146,6 +147,9 @@ br_switchdev_fdb_notify(struct net_bridge *br,
{
struct switchdev_notifier_fdb_info item;
+ if (test_bit(BR_FDB_LOCKED, &fdb->flags))
+ return;
+
br_switchdev_fdb_populate(br, &item, fdb, NULL);
switch (type) {
diff --git a/net/bridge/br_vlan.c b/net/bridge/br_vlan.c
index 6e53dc991409..f2fc284abab3 100644
--- a/net/bridge/br_vlan.c
+++ b/net/bridge/br_vlan.c
@@ -1378,12 +1378,12 @@ void br_vlan_get_stats(const struct net_bridge_vlan *v,
cpu_stats = per_cpu_ptr(v->stats, i);
do {
- start = u64_stats_fetch_begin_irq(&cpu_stats->syncp);
+ start = u64_stats_fetch_begin(&cpu_stats->syncp);
rxpackets = u64_stats_read(&cpu_stats->rx_packets);
rxbytes = u64_stats_read(&cpu_stats->rx_bytes);
txbytes = u64_stats_read(&cpu_stats->tx_bytes);
txpackets = u64_stats_read(&cpu_stats->tx_packets);
- } while (u64_stats_fetch_retry_irq(&cpu_stats->syncp, start));
+ } while (u64_stats_fetch_retry(&cpu_stats->syncp, start));
u64_stats_add(&stats->rx_packets, rxpackets);
u64_stats_add(&stats->rx_bytes, rxbytes);
diff --git a/net/can/j1939/transport.c b/net/can/j1939/transport.c
index 55f29c9f9e08..f26f4cfa9e63 100644
--- a/net/can/j1939/transport.c
+++ b/net/can/j1939/transport.c
@@ -987,7 +987,7 @@ static int j1939_session_tx_eoma(struct j1939_session *session)
/* wait for the EOMA packet to come in */
j1939_tp_set_rxtimeout(session, 1250);
- netdev_dbg(session->priv->ndev, "%p: 0x%p\n", __func__, session);
+ netdev_dbg(session->priv->ndev, "%s: 0x%p\n", __func__, session);
return 0;
}
diff --git a/net/core/bpf_sk_storage.c b/net/core/bpf_sk_storage.c
index 94374d529ea4..49884e7de080 100644
--- a/net/core/bpf_sk_storage.c
+++ b/net/core/bpf_sk_storage.c
@@ -48,10 +48,8 @@ static int bpf_sk_storage_del(struct sock *sk, struct bpf_map *map)
/* Called by __sk_destruct() & bpf_sk_storage_clone() */
void bpf_sk_storage_free(struct sock *sk)
{
- struct bpf_local_storage_elem *selem;
struct bpf_local_storage *sk_storage;
bool free_sk_storage = false;
- struct hlist_node *n;
rcu_read_lock();
sk_storage = rcu_dereference(sk->sk_bpf_storage);
@@ -60,24 +58,8 @@ void bpf_sk_storage_free(struct sock *sk)
return;
}
- /* Netiher the bpf_prog nor the bpf-map's syscall
- * could be modifying the sk_storage->list now.
- * Thus, no elem can be added-to or deleted-from the
- * sk_storage->list by the bpf_prog or by the bpf-map's syscall.
- *
- * It is racing with bpf_local_storage_map_free() alone
- * when unlinking elem from the sk_storage->list and
- * the map's bucket->list.
- */
raw_spin_lock_bh(&sk_storage->lock);
- hlist_for_each_entry_safe(selem, n, &sk_storage->list, snode) {
- /* Always unlink from map before unlinking from
- * sk_storage.
- */
- bpf_selem_unlink_map(selem);
- free_sk_storage = bpf_selem_unlink_storage_nolock(
- sk_storage, selem, true, false);
- }
+ free_sk_storage = bpf_local_storage_unlink_nolock(sk_storage);
raw_spin_unlock_bh(&sk_storage->lock);
rcu_read_unlock();
@@ -87,23 +69,12 @@ void bpf_sk_storage_free(struct sock *sk)
static void bpf_sk_storage_map_free(struct bpf_map *map)
{
- struct bpf_local_storage_map *smap;
-
- smap = (struct bpf_local_storage_map *)map;
- bpf_local_storage_cache_idx_free(&sk_cache, smap->cache_idx);
- bpf_local_storage_map_free(smap, NULL);
+ bpf_local_storage_map_free(map, &sk_cache, NULL);
}
static struct bpf_map *bpf_sk_storage_map_alloc(union bpf_attr *attr)
{
- struct bpf_local_storage_map *smap;
-
- smap = bpf_local_storage_map_alloc(attr);
- if (IS_ERR(smap))
- return ERR_CAST(smap);
-
- smap->cache_idx = bpf_local_storage_cache_idx_get(&sk_cache);
- return &smap->map;
+ return bpf_local_storage_map_alloc(attr, &sk_cache);
}
static int notsupp_get_next_key(struct bpf_map *map, void *key,
diff --git a/net/core/dev.c b/net/core/dev.c
index 3be256051e99..117e830cabb0 100644
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -1163,22 +1163,6 @@ int dev_change_name(struct net_device *dev, const char *newname)
net = dev_net(dev);
- /* Some auto-enslaved devices e.g. failover slaves are
- * special, as userspace might rename the device after
- * the interface had been brought up and running since
- * the point kernel initiated auto-enslavement. Allow
- * live name change even when these slave devices are
- * up and running.
- *
- * Typically, users of these auto-enslaving devices
- * don't actually care about slave name change, as
- * they are supposed to operate on master interface
- * directly.
- */
- if (dev->flags & IFF_UP &&
- likely(!(dev->priv_flags & IFF_LIVE_RENAME_OK)))
- return -EBUSY;
-
down_write(&devnet_rename_sem);
if (strncmp(newname, dev->name, IFNAMSIZ) == 0) {
@@ -1195,7 +1179,8 @@ int dev_change_name(struct net_device *dev, const char *newname)
}
if (oldname[0] && !strchr(oldname, '%'))
- netdev_info(dev, "renamed from %s\n", oldname);
+ netdev_info(dev, "renamed from %s%s\n", oldname,
+ dev->flags & IFF_UP ? " (while UP)" : "");
old_assign_type = dev->name_assign_type;
dev->name_assign_type = NET_NAME_RENAMED;
@@ -1333,7 +1318,7 @@ void netdev_state_change(struct net_device *dev)
call_netdevice_notifiers_info(NETDEV_CHANGE,
&change_info.info);
- rtmsg_ifinfo(RTM_NEWLINK, dev, 0, GFP_KERNEL);
+ rtmsg_ifinfo(RTM_NEWLINK, dev, 0, GFP_KERNEL, 0, NULL);
}
}
EXPORT_SYMBOL(netdev_state_change);
@@ -1469,7 +1454,7 @@ int dev_open(struct net_device *dev, struct netlink_ext_ack *extack)
if (ret < 0)
return ret;
- rtmsg_ifinfo(RTM_NEWLINK, dev, IFF_UP|IFF_RUNNING, GFP_KERNEL);
+ rtmsg_ifinfo(RTM_NEWLINK, dev, IFF_UP | IFF_RUNNING, GFP_KERNEL, 0, NULL);
call_netdevice_notifiers(NETDEV_UP, dev);
return ret;
@@ -1541,7 +1526,7 @@ void dev_close_many(struct list_head *head, bool unlink)
__dev_close_many(head);
list_for_each_entry_safe(dev, tmp, head, close_list) {
- rtmsg_ifinfo(RTM_NEWLINK, dev, IFF_UP|IFF_RUNNING, GFP_KERNEL);
+ rtmsg_ifinfo(RTM_NEWLINK, dev, IFF_UP | IFF_RUNNING, GFP_KERNEL, 0, NULL);
call_netdevice_notifiers(NETDEV_DOWN, dev);
if (unlink)
list_del_init(&dev->close_list);
@@ -1621,10 +1606,10 @@ const char *netdev_cmd_to_name(enum netdev_cmd cmd)
N(UP) N(DOWN) N(REBOOT) N(CHANGE) N(REGISTER) N(UNREGISTER)
N(CHANGEMTU) N(CHANGEADDR) N(GOING_DOWN) N(CHANGENAME) N(FEAT_CHANGE)
N(BONDING_FAILOVER) N(PRE_UP) N(PRE_TYPE_CHANGE) N(POST_TYPE_CHANGE)
- N(POST_INIT) N(RELEASE) N(NOTIFY_PEERS) N(JOIN) N(CHANGEUPPER)
- N(RESEND_IGMP) N(PRECHANGEMTU) N(CHANGEINFODATA) N(BONDING_INFO)
- N(PRECHANGEUPPER) N(CHANGELOWERSTATE) N(UDP_TUNNEL_PUSH_INFO)
- N(UDP_TUNNEL_DROP_INFO) N(CHANGE_TX_QUEUE_LEN)
+ N(POST_INIT) N(PRE_UNINIT) N(RELEASE) N(NOTIFY_PEERS) N(JOIN)
+ N(CHANGEUPPER) N(RESEND_IGMP) N(PRECHANGEMTU) N(CHANGEINFODATA)
+ N(BONDING_INFO) N(PRECHANGEUPPER) N(CHANGELOWERSTATE)
+ N(UDP_TUNNEL_PUSH_INFO) N(UDP_TUNNEL_DROP_INFO) N(CHANGE_TX_QUEUE_LEN)
N(CVLAN_FILTER_PUSH_INFO) N(CVLAN_FILTER_DROP_INFO)
N(SVLAN_FILTER_PUSH_INFO) N(SVLAN_FILTER_DROP_INFO)
N(PRE_CHANGEADDR) N(OFFLOAD_XSTATS_ENABLE) N(OFFLOAD_XSTATS_DISABLE)
@@ -1876,6 +1861,22 @@ int unregister_netdevice_notifier_net(struct net *net,
}
EXPORT_SYMBOL(unregister_netdevice_notifier_net);
+static void __move_netdevice_notifier_net(struct net *src_net,
+ struct net *dst_net,
+ struct notifier_block *nb)
+{
+ __unregister_netdevice_notifier_net(src_net, nb);
+ __register_netdevice_notifier_net(dst_net, nb, true);
+}
+
+void move_netdevice_notifier_net(struct net *src_net, struct net *dst_net,
+ struct notifier_block *nb)
+{
+ rtnl_lock();
+ __move_netdevice_notifier_net(src_net, dst_net, nb);
+ rtnl_unlock();
+}
+
int register_netdevice_notifier_dev_net(struct net_device *dev,
struct notifier_block *nb,
struct netdev_net_notifier *nn)
@@ -1912,10 +1913,8 @@ static void move_netdevice_notifiers_dev_net(struct net_device *dev,
{
struct netdev_net_notifier *nn;
- list_for_each_entry(nn, &dev->net_notifier_list, list) {
- __unregister_netdevice_notifier_net(dev_net(dev), nn->nb);
- __register_netdevice_notifier_net(net, nn->nb, true);
- }
+ list_for_each_entry(nn, &dev->net_notifier_list, list)
+ __move_netdevice_notifier_net(dev_net(dev), net, nn->nb);
}
/**
@@ -8351,7 +8350,7 @@ static int __dev_set_promiscuity(struct net_device *dev, int inc, bool notify)
dev_change_rx_flags(dev, IFF_PROMISC);
}
if (notify)
- __dev_notify_flags(dev, old_flags, IFF_PROMISC);
+ __dev_notify_flags(dev, old_flags, IFF_PROMISC, 0, NULL);
return 0;
}
@@ -8406,7 +8405,7 @@ static int __dev_set_allmulti(struct net_device *dev, int inc, bool notify)
dev_set_rx_mode(dev);
if (notify)
__dev_notify_flags(dev, old_flags,
- dev->gflags ^ old_gflags);
+ dev->gflags ^ old_gflags, 0, NULL);
}
return 0;
}
@@ -8569,12 +8568,13 @@ int __dev_change_flags(struct net_device *dev, unsigned int flags,
}
void __dev_notify_flags(struct net_device *dev, unsigned int old_flags,
- unsigned int gchanges)
+ unsigned int gchanges, u32 portid,
+ const struct nlmsghdr *nlh)
{
unsigned int changes = dev->flags ^ old_flags;
if (gchanges)
- rtmsg_ifinfo(RTM_NEWLINK, dev, gchanges, GFP_ATOMIC);
+ rtmsg_ifinfo(RTM_NEWLINK, dev, gchanges, GFP_ATOMIC, portid, nlh);
if (changes & IFF_UP) {
if (dev->flags & IFF_UP)
@@ -8616,7 +8616,7 @@ int dev_change_flags(struct net_device *dev, unsigned int flags,
return ret;
changes = (old_flags ^ dev->flags) | (old_gflags ^ dev->gflags);
- __dev_notify_flags(dev, old_flags, changes);
+ __dev_notify_flags(dev, old_flags, changes, 0, NULL);
return ret;
}
EXPORT_SYMBOL(dev_change_flags);
@@ -8822,7 +8822,7 @@ EXPORT_SYMBOL(dev_set_mac_address_user);
int dev_get_mac_address(struct sockaddr *sa, struct net *net, char *dev_name)
{
- size_t size = sizeof(sa->sa_data);
+ size_t size = sizeof(sa->sa_data_min);
struct net_device *dev;
int ret = 0;
@@ -10059,7 +10059,7 @@ int register_netdevice(struct net_device *dev)
dev->reg_state = ret ? NETREG_UNREGISTERED : NETREG_REGISTERED;
write_unlock(&dev_base_lock);
if (ret)
- goto err_uninit;
+ goto err_uninit_notify;
__netdev_update_features(dev);
@@ -10101,11 +10101,13 @@ int register_netdevice(struct net_device *dev)
*/
if (!dev->rtnl_link_ops ||
dev->rtnl_link_state == RTNL_LINK_INITIALIZED)
- rtmsg_ifinfo(RTM_NEWLINK, dev, ~0U, GFP_KERNEL);
+ rtmsg_ifinfo(RTM_NEWLINK, dev, ~0U, GFP_KERNEL, 0, NULL);
out:
return ret;
+err_uninit_notify:
+ call_netdevice_notifiers(NETDEV_PRE_UNINIT, dev);
err_uninit:
if (dev->netdev_ops->ndo_uninit)
dev->netdev_ops->ndo_uninit(dev);
@@ -10477,12 +10479,12 @@ void dev_fetch_sw_netstats(struct rtnl_link_stats64 *s,
stats = per_cpu_ptr(netstats, cpu);
do {
- start = u64_stats_fetch_begin_irq(&stats->syncp);
+ start = u64_stats_fetch_begin(&stats->syncp);
rx_packets = u64_stats_read(&stats->rx_packets);
rx_bytes = u64_stats_read(&stats->rx_bytes);
tx_packets = u64_stats_read(&stats->tx_packets);
tx_bytes = u64_stats_read(&stats->tx_bytes);
- } while (u64_stats_fetch_retry_irq(&stats->syncp, start));
+ } while (u64_stats_fetch_retry(&stats->syncp, start));
s->rx_packets += rx_packets;
s->rx_bytes += rx_bytes;
@@ -10780,14 +10782,8 @@ void unregister_netdevice_queue(struct net_device *dev, struct list_head *head)
}
EXPORT_SYMBOL(unregister_netdevice_queue);
-/**
- * unregister_netdevice_many - unregister many devices
- * @head: list of devices
- *
- * Note: As most callers use a stack allocated list_head,
- * we force a list_del() to make sure stack wont be corrupted later.
- */
-void unregister_netdevice_many(struct list_head *head)
+void unregister_netdevice_many_notify(struct list_head *head,
+ u32 portid, const struct nlmsghdr *nlh)
{
struct net_device *dev, *tmp;
LIST_HEAD(close_head);
@@ -10849,7 +10845,8 @@ void unregister_netdevice_many(struct list_head *head)
if (!dev->rtnl_link_ops ||
dev->rtnl_link_state == RTNL_LINK_INITIALIZED)
skb = rtmsg_ifinfo_build_skb(RTM_DELLINK, dev, ~0U, 0,
- GFP_KERNEL, NULL, 0);
+ GFP_KERNEL, NULL, 0,
+ portid, nlmsg_seq(nlh));
/*
* Flush the unicast and multicast chains
@@ -10860,11 +10857,13 @@ void unregister_netdevice_many(struct list_head *head)
netdev_name_node_alt_flush(dev);
netdev_name_node_free(dev->name_node);
+ call_netdevice_notifiers(NETDEV_PRE_UNINIT, dev);
+
if (dev->netdev_ops->ndo_uninit)
dev->netdev_ops->ndo_uninit(dev);
if (skb)
- rtmsg_ifinfo_send(skb, dev, GFP_KERNEL);
+ rtmsg_ifinfo_send(skb, dev, GFP_KERNEL, portid, nlh);
/* Notifier chain MUST detach us all upper devices. */
WARN_ON(netdev_has_any_upper_dev(dev));
@@ -10887,6 +10886,18 @@ void unregister_netdevice_many(struct list_head *head)
list_del(head);
}
+
+/**
+ * unregister_netdevice_many - unregister many devices
+ * @head: list of devices
+ *
+ * Note: As most callers use a stack allocated list_head,
+ * we force a list_del() to make sure stack wont be corrupted later.
+ */
+void unregister_netdevice_many(struct list_head *head)
+{
+ unregister_netdevice_many_notify(head, 0, NULL);
+}
EXPORT_SYMBOL(unregister_netdevice_many);
/**
@@ -11042,7 +11053,7 @@ int __dev_change_net_namespace(struct net_device *dev, struct net *net,
* Prevent userspace races by waiting until the network
* device is fully setup before sending notifications.
*/
- rtmsg_ifinfo(RTM_NEWLINK, dev, ~0U, GFP_KERNEL);
+ rtmsg_ifinfo(RTM_NEWLINK, dev, ~0U, GFP_KERNEL, 0, NULL);
synchronize_net();
err = 0;
diff --git a/net/core/dev.h b/net/core/dev.h
index cbb8a925175a..814ed5b7b960 100644
--- a/net/core/dev.h
+++ b/net/core/dev.h
@@ -88,6 +88,13 @@ int dev_change_carrier(struct net_device *dev, bool new_carrier);
void __dev_set_rx_mode(struct net_device *dev);
+void __dev_notify_flags(struct net_device *dev, unsigned int old_flags,
+ unsigned int gchanges, u32 portid,
+ const struct nlmsghdr *nlh);
+
+void unregister_netdevice_many_notify(struct list_head *head,
+ u32 portid, const struct nlmsghdr *nlh);
+
static inline void netif_set_gso_max_size(struct net_device *dev,
unsigned int size)
{
diff --git a/net/core/dev_ioctl.c b/net/core/dev_ioctl.c
index 7674bb9f3076..5cdbfbf9a7dc 100644
--- a/net/core/dev_ioctl.c
+++ b/net/core/dev_ioctl.c
@@ -342,7 +342,7 @@ static int dev_ifsioc(struct net *net, struct ifreq *ifr, void __user *data,
if (ifr->ifr_hwaddr.sa_family != dev->type)
return -EINVAL;
memcpy(dev->broadcast, ifr->ifr_hwaddr.sa_data,
- min(sizeof(ifr->ifr_hwaddr.sa_data),
+ min(sizeof(ifr->ifr_hwaddr.sa_data_min),
(size_t)dev->addr_len));
call_netdevice_notifiers(NETDEV_CHANGEADDR, dev);
return 0;
diff --git a/net/core/devlink.c b/net/core/devlink.c
index 89baa7c0938b..7f789bbcbbd7 100644
--- a/net/core/devlink.c
+++ b/net/core/devlink.c
@@ -71,6 +71,7 @@ struct devlink {
refcount_t refcount;
struct completion comp;
struct rcu_head rcu;
+ struct notifier_block netdevice_nb;
char priv[] __aligned(NETDEV_ALIGN);
};
@@ -769,7 +770,7 @@ devlink_region_snapshot_get_by_id(struct devlink_region *region, u32 id)
#define DEVLINK_NL_FLAG_NEED_RATE_NODE BIT(3)
#define DEVLINK_NL_FLAG_NEED_LINECARD BIT(4)
-static int devlink_nl_pre_doit(const struct genl_ops *ops,
+static int devlink_nl_pre_doit(const struct genl_split_ops *ops,
struct sk_buff *skb, struct genl_info *info)
{
struct devlink_linecard *linecard;
@@ -827,7 +828,7 @@ unlock:
return err;
}
-static void devlink_nl_post_doit(const struct genl_ops *ops,
+static void devlink_nl_post_doit(const struct genl_split_ops *ops,
struct sk_buff *skb, struct genl_info *info)
{
struct devlink_linecard *linecard;
@@ -879,6 +880,24 @@ nla_put_failure:
return -EMSGSIZE;
}
+int devlink_nl_port_handle_fill(struct sk_buff *msg, struct devlink_port *devlink_port)
+{
+ if (devlink_nl_put_handle(msg, devlink_port->devlink))
+ return -EMSGSIZE;
+ if (nla_put_u32(msg, DEVLINK_ATTR_PORT_INDEX, devlink_port->index))
+ return -EMSGSIZE;
+ return 0;
+}
+
+size_t devlink_nl_port_handle_size(struct devlink_port *devlink_port)
+{
+ struct devlink *devlink = devlink_port->devlink;
+
+ return nla_total_size(strlen(devlink->dev->bus->name) + 1) /* DEVLINK_ATTR_BUS_NAME */
+ + nla_total_size(strlen(dev_name(devlink->dev)) + 1) /* DEVLINK_ATTR_DEV_NAME */
+ + nla_total_size(4); /* DEVLINK_ATTR_PORT_INDEX */
+}
+
struct devlink_reload_combination {
enum devlink_reload_action action;
enum devlink_reload_limit limit;
@@ -1292,8 +1311,6 @@ static int devlink_nl_port_fill(struct sk_buff *msg,
if (nla_put_u32(msg, DEVLINK_ATTR_PORT_INDEX, devlink_port->index))
goto nla_put_failure;
- /* Hold rtnl lock while accessing port's netdev attributes. */
- rtnl_lock();
spin_lock_bh(&devlink_port->type_lock);
if (nla_put_u16(msg, DEVLINK_ATTR_PORT_TYPE, devlink_port->type))
goto nla_put_failure_type_locked;
@@ -1302,18 +1319,15 @@ static int devlink_nl_port_fill(struct sk_buff *msg,
devlink_port->desired_type))
goto nla_put_failure_type_locked;
if (devlink_port->type == DEVLINK_PORT_TYPE_ETH) {
- struct net *net = devlink_net(devlink_port->devlink);
- struct net_device *netdev = devlink_port->type_dev;
-
- if (netdev && net_eq(net, dev_net(netdev)) &&
+ if (devlink_port->type_eth.netdev &&
(nla_put_u32(msg, DEVLINK_ATTR_PORT_NETDEV_IFINDEX,
- netdev->ifindex) ||
+ devlink_port->type_eth.ifindex) ||
nla_put_string(msg, DEVLINK_ATTR_PORT_NETDEV_NAME,
- netdev->name)))
+ devlink_port->type_eth.ifname)))
goto nla_put_failure_type_locked;
}
if (devlink_port->type == DEVLINK_PORT_TYPE_IB) {
- struct ib_device *ibdev = devlink_port->type_dev;
+ struct ib_device *ibdev = devlink_port->type_ib.ibdev;
if (ibdev &&
nla_put_string(msg, DEVLINK_ATTR_PORT_IBDEV_NAME,
@@ -1321,7 +1335,6 @@ static int devlink_nl_port_fill(struct sk_buff *msg,
goto nla_put_failure_type_locked;
}
spin_unlock_bh(&devlink_port->type_lock);
- rtnl_unlock();
if (devlink_nl_port_attrs_put(msg, devlink_port))
goto nla_put_failure;
if (devlink_nl_port_function_attrs_put(msg, devlink_port, extack))
@@ -1336,7 +1349,6 @@ static int devlink_nl_port_fill(struct sk_buff *msg,
nla_put_failure_type_locked:
spin_unlock_bh(&devlink_port->type_lock);
- rtnl_unlock();
nla_put_failure:
genlmsg_cancel(msg, hdr);
return -EMSGSIZE;
@@ -4490,8 +4502,11 @@ static int devlink_reload(struct devlink *devlink, struct net *dest_net,
if (err)
return err;
- if (dest_net && !net_eq(dest_net, curr_net))
+ if (dest_net && !net_eq(dest_net, curr_net)) {
+ move_netdevice_notifier_net(curr_net, dest_net,
+ &devlink->netdevice_nb);
write_pnet(&devlink->_net, dest_net);
+ }
err = devlink->ops->reload_up(devlink, action, limit, actions_performed, extack);
devlink_reload_failed_set(devlink, !!err);
@@ -8304,10 +8319,10 @@ static void devlink_trap_stats_read(struct devlink_stats __percpu *trap_stats,
cpu_stats = per_cpu_ptr(trap_stats, i);
do {
- start = u64_stats_fetch_begin_irq(&cpu_stats->syncp);
+ start = u64_stats_fetch_begin(&cpu_stats->syncp);
rx_packets = u64_stats_read(&cpu_stats->rx_packets);
rx_bytes = u64_stats_read(&cpu_stats->rx_bytes);
- } while (u64_stats_fetch_retry_irq(&cpu_stats->syncp, start));
+ } while (u64_stats_fetch_retry(&cpu_stats->syncp, start));
u64_stats_add(&stats->rx_packets, rx_packets);
u64_stats_add(&stats->rx_bytes, rx_bytes);
@@ -9602,6 +9617,9 @@ void devlink_set_features(struct devlink *devlink, u64 features)
}
EXPORT_SYMBOL_GPL(devlink_set_features);
+static int devlink_netdevice_event(struct notifier_block *nb,
+ unsigned long event, void *ptr);
+
/**
* devlink_alloc_ns - Allocate new devlink instance resources
* in specific namespace
@@ -9632,10 +9650,13 @@ struct devlink *devlink_alloc_ns(const struct devlink_ops *ops,
ret = xa_alloc_cyclic(&devlinks, &devlink->index, devlink, xa_limit_31b,
&last_id, GFP_KERNEL);
- if (ret < 0) {
- kfree(devlink);
- return NULL;
- }
+ if (ret < 0)
+ goto err_xa_alloc;
+
+ devlink->netdevice_nb.notifier_call = devlink_netdevice_event;
+ ret = register_netdevice_notifier_net(net, &devlink->netdevice_nb);
+ if (ret)
+ goto err_register_netdevice_notifier;
devlink->dev = dev;
devlink->ops = ops;
@@ -9662,6 +9683,12 @@ struct devlink *devlink_alloc_ns(const struct devlink_ops *ops,
init_completion(&devlink->comp);
return devlink;
+
+err_register_netdevice_notifier:
+ xa_erase(&devlinks, devlink->index);
+err_xa_alloc:
+ kfree(devlink);
+ return NULL;
}
EXPORT_SYMBOL_GPL(devlink_alloc_ns);
@@ -9815,6 +9842,10 @@ void devlink_free(struct devlink *devlink)
WARN_ON(!list_empty(&devlink->port_list));
xa_destroy(&devlink->snapshot_ids);
+
+ unregister_netdevice_notifier_net(devlink_net(devlink),
+ &devlink->netdevice_nb);
+
xa_erase(&devlinks, devlink->index);
kfree(devlink);
@@ -9967,6 +9998,7 @@ EXPORT_SYMBOL_GPL(devlink_port_register);
void devl_port_unregister(struct devlink_port *devlink_port)
{
lockdep_assert_held(&devlink_port->devlink->lock);
+ WARN_ON(devlink_port->type != DEVLINK_PORT_TYPE_NOTSET);
devlink_port_type_warn_cancel(devlink_port);
devlink_port_notify(devlink_port, DEVLINK_CMD_PORT_DEL);
@@ -9994,20 +10026,6 @@ void devlink_port_unregister(struct devlink_port *devlink_port)
}
EXPORT_SYMBOL_GPL(devlink_port_unregister);
-static void __devlink_port_type_set(struct devlink_port *devlink_port,
- enum devlink_port_type type,
- void *type_dev)
-{
- ASSERT_DEVLINK_PORT_REGISTERED(devlink_port);
-
- devlink_port_type_warn_cancel(devlink_port);
- spin_lock_bh(&devlink_port->type_lock);
- devlink_port->type = type;
- devlink_port->type_dev = type_dev;
- spin_unlock_bh(&devlink_port->type_lock);
- devlink_port_notify(devlink_port, DEVLINK_CMD_PORT_NEW);
-}
-
static void devlink_port_type_netdev_checks(struct devlink_port *devlink_port,
struct net_device *netdev)
{
@@ -10045,23 +10063,58 @@ static void devlink_port_type_netdev_checks(struct devlink_port *devlink_port,
}
}
+static void __devlink_port_type_set(struct devlink_port *devlink_port,
+ enum devlink_port_type type,
+ void *type_dev)
+{
+ struct net_device *netdev = type_dev;
+
+ ASSERT_DEVLINK_PORT_REGISTERED(devlink_port);
+
+ if (type == DEVLINK_PORT_TYPE_NOTSET) {
+ devlink_port_type_warn_schedule(devlink_port);
+ } else {
+ devlink_port_type_warn_cancel(devlink_port);
+ if (type == DEVLINK_PORT_TYPE_ETH && netdev)
+ devlink_port_type_netdev_checks(devlink_port, netdev);
+ }
+
+ spin_lock_bh(&devlink_port->type_lock);
+ devlink_port->type = type;
+ switch (type) {
+ case DEVLINK_PORT_TYPE_ETH:
+ devlink_port->type_eth.netdev = netdev;
+ if (netdev) {
+ ASSERT_RTNL();
+ devlink_port->type_eth.ifindex = netdev->ifindex;
+ BUILD_BUG_ON(sizeof(devlink_port->type_eth.ifname) !=
+ sizeof(netdev->name));
+ strcpy(devlink_port->type_eth.ifname, netdev->name);
+ }
+ break;
+ case DEVLINK_PORT_TYPE_IB:
+ devlink_port->type_ib.ibdev = type_dev;
+ break;
+ default:
+ break;
+ }
+ spin_unlock_bh(&devlink_port->type_lock);
+ devlink_port_notify(devlink_port, DEVLINK_CMD_PORT_NEW);
+}
+
/**
* devlink_port_type_eth_set - Set port type to Ethernet
*
* @devlink_port: devlink port
- * @netdev: related netdevice
+ *
+ * If driver is calling this, most likely it is doing something wrong.
*/
-void devlink_port_type_eth_set(struct devlink_port *devlink_port,
- struct net_device *netdev)
+void devlink_port_type_eth_set(struct devlink_port *devlink_port)
{
- if (netdev)
- devlink_port_type_netdev_checks(devlink_port, netdev);
- else
- dev_warn(devlink_port->devlink->dev,
- "devlink port type for port %d set to Ethernet without a software interface reference, device type not supported by the kernel?\n",
- devlink_port->index);
-
- __devlink_port_type_set(devlink_port, DEVLINK_PORT_TYPE_ETH, netdev);
+ dev_warn(devlink_port->devlink->dev,
+ "devlink port type for port %d set to Ethernet without a software interface reference, device type not supported by the kernel?\n",
+ devlink_port->index);
+ __devlink_port_type_set(devlink_port, DEVLINK_PORT_TYPE_ETH, NULL);
}
EXPORT_SYMBOL_GPL(devlink_port_type_eth_set);
@@ -10082,14 +10135,71 @@ EXPORT_SYMBOL_GPL(devlink_port_type_ib_set);
* devlink_port_type_clear - Clear port type
*
* @devlink_port: devlink port
+ *
+ * If driver is calling this for clearing Ethernet type, most likely
+ * it is doing something wrong.
*/
void devlink_port_type_clear(struct devlink_port *devlink_port)
{
+ if (devlink_port->type == DEVLINK_PORT_TYPE_ETH)
+ dev_warn(devlink_port->devlink->dev,
+ "devlink port type for port %d cleared without a software interface reference, device type not supported by the kernel?\n",
+ devlink_port->index);
__devlink_port_type_set(devlink_port, DEVLINK_PORT_TYPE_NOTSET, NULL);
- devlink_port_type_warn_schedule(devlink_port);
}
EXPORT_SYMBOL_GPL(devlink_port_type_clear);
+static int devlink_netdevice_event(struct notifier_block *nb,
+ unsigned long event, void *ptr)
+{
+ struct net_device *netdev = netdev_notifier_info_to_dev(ptr);
+ struct devlink_port *devlink_port = netdev->devlink_port;
+ struct devlink *devlink;
+
+ devlink = container_of(nb, struct devlink, netdevice_nb);
+
+ if (!devlink_port || devlink_port->devlink != devlink)
+ return NOTIFY_OK;
+
+ switch (event) {
+ case NETDEV_POST_INIT:
+ /* Set the type but not netdev pointer. It is going to be set
+ * later on by NETDEV_REGISTER event. Happens once during
+ * netdevice register
+ */
+ __devlink_port_type_set(devlink_port, DEVLINK_PORT_TYPE_ETH,
+ NULL);
+ break;
+ case NETDEV_REGISTER:
+ case NETDEV_CHANGENAME:
+ /* Set the netdev on top of previously set type. Note this
+ * event happens also during net namespace change so here
+ * we take into account netdev pointer appearing in this
+ * namespace.
+ */
+ __devlink_port_type_set(devlink_port, devlink_port->type,
+ netdev);
+ break;
+ case NETDEV_UNREGISTER:
+ /* Clear netdev pointer, but not the type. This event happens
+ * also during net namespace change so we need to clear
+ * pointer to netdev that is going to another net namespace.
+ */
+ __devlink_port_type_set(devlink_port, devlink_port->type,
+ NULL);
+ break;
+ case NETDEV_PRE_UNINIT:
+ /* Clear the type and the netdev pointer. Happens one during
+ * netdevice unregister.
+ */
+ __devlink_port_type_set(devlink_port, DEVLINK_PORT_TYPE_NOTSET,
+ NULL);
+ break;
+ }
+
+ return NOTIFY_OK;
+}
+
static int __devlink_port_attrs_set(struct devlink_port *devlink_port,
enum devlink_port_flavour flavour)
{
@@ -11624,6 +11734,8 @@ static const struct devlink_trap devlink_trap_generic[] = {
DEVLINK_TRAP(ESP_PARSING, DROP),
DEVLINK_TRAP(BLACKHOLE_NEXTHOP, DROP),
DEVLINK_TRAP(DMAC_FILTER, DROP),
+ DEVLINK_TRAP(EAPOL, CONTROL),
+ DEVLINK_TRAP(LOCKED_PORT, DROP),
};
#define DEVLINK_TRAP_GROUP(_id) \
@@ -11659,6 +11771,7 @@ static const struct devlink_trap_group devlink_trap_group_generic[] = {
DEVLINK_TRAP_GROUP(ACL_SAMPLE),
DEVLINK_TRAP_GROUP(ACL_TRAP),
DEVLINK_TRAP_GROUP(PARSER_ERROR_DROPS),
+ DEVLINK_TRAP_GROUP(EAPOL),
};
static int devlink_trap_generic_verify(const struct devlink_trap *trap)
@@ -12016,7 +12129,7 @@ devlink_trap_report_metadata_set(struct devlink_trap_metadata *metadata,
spin_lock(&in_devlink_port->type_lock);
if (in_devlink_port->type == DEVLINK_PORT_TYPE_ETH)
- metadata->input_dev = in_devlink_port->type_dev;
+ metadata->input_dev = in_devlink_port->type_eth.netdev;
spin_unlock(&in_devlink_port->type_lock);
}
@@ -12416,14 +12529,6 @@ free_msg:
nlmsg_free(msg);
}
-static struct devlink_port *netdev_to_devlink_port(struct net_device *dev)
-{
- if (!dev->netdev_ops->ndo_get_devlink_port)
- return NULL;
-
- return dev->netdev_ops->ndo_get_devlink_port(dev);
-}
-
void devlink_compat_running_version(struct devlink *devlink,
char *buf, size_t len)
{
@@ -12469,7 +12574,7 @@ int devlink_compat_phys_port_name_get(struct net_device *dev,
*/
ASSERT_RTNL();
- devlink_port = netdev_to_devlink_port(dev);
+ devlink_port = dev->devlink_port;
if (!devlink_port)
return -EOPNOTSUPP;
@@ -12485,7 +12590,7 @@ int devlink_compat_switch_id_get(struct net_device *dev,
* devlink_port instance cannot disappear in the middle. No need to take
* any devlink lock as only permanent values are accessed.
*/
- devlink_port = netdev_to_devlink_port(dev);
+ devlink_port = dev->devlink_port;
if (!devlink_port || !devlink_port->switch_port)
return -EOPNOTSUPP;
diff --git a/net/core/drop_monitor.c b/net/core/drop_monitor.c
index f084a4a6b7ab..5a782d1d8fd3 100644
--- a/net/core/drop_monitor.c
+++ b/net/core/drop_monitor.c
@@ -1432,9 +1432,9 @@ static void net_dm_stats_read(struct net_dm_stats *stats)
u64 dropped;
do {
- start = u64_stats_fetch_begin_irq(&cpu_stats->syncp);
+ start = u64_stats_fetch_begin(&cpu_stats->syncp);
dropped = u64_stats_read(&cpu_stats->dropped);
- } while (u64_stats_fetch_retry_irq(&cpu_stats->syncp, start));
+ } while (u64_stats_fetch_retry(&cpu_stats->syncp, start));
u64_stats_add(&stats->dropped, dropped);
}
@@ -1476,9 +1476,9 @@ static void net_dm_hw_stats_read(struct net_dm_stats *stats)
u64 dropped;
do {
- start = u64_stats_fetch_begin_irq(&cpu_stats->syncp);
+ start = u64_stats_fetch_begin(&cpu_stats->syncp);
dropped = u64_stats_read(&cpu_stats->dropped);
- } while (u64_stats_fetch_retry_irq(&cpu_stats->syncp, start));
+ } while (u64_stats_fetch_retry(&cpu_stats->syncp, start));
u64_stats_add(&stats->dropped, dropped);
}
@@ -1620,7 +1620,7 @@ static const struct genl_small_ops dropmon_ops[] = {
},
};
-static int net_dm_nl_pre_doit(const struct genl_ops *ops,
+static int net_dm_nl_pre_doit(const struct genl_split_ops *ops,
struct sk_buff *skb, struct genl_info *info)
{
mutex_lock(&net_dm_mutex);
@@ -1628,7 +1628,7 @@ static int net_dm_nl_pre_doit(const struct genl_ops *ops,
return 0;
}
-static void net_dm_nl_post_doit(const struct genl_ops *ops,
+static void net_dm_nl_post_doit(const struct genl_split_ops *ops,
struct sk_buff *skb, struct genl_info *info)
{
mutex_unlock(&net_dm_mutex);
diff --git a/net/core/failover.c b/net/core/failover.c
index 864d2d83eff4..655411c4ca51 100644
--- a/net/core/failover.c
+++ b/net/core/failover.c
@@ -80,14 +80,14 @@ static int failover_slave_register(struct net_device *slave_dev)
goto err_upper_link;
}
- slave_dev->priv_flags |= (IFF_FAILOVER_SLAVE | IFF_LIVE_RENAME_OK);
+ slave_dev->priv_flags |= IFF_FAILOVER_SLAVE;
if (fops && fops->slave_register &&
!fops->slave_register(slave_dev, failover_dev))
return NOTIFY_OK;
netdev_upper_dev_unlink(slave_dev, failover_dev);
- slave_dev->priv_flags &= ~(IFF_FAILOVER_SLAVE | IFF_LIVE_RENAME_OK);
+ slave_dev->priv_flags &= ~IFF_FAILOVER_SLAVE;
err_upper_link:
netdev_rx_handler_unregister(slave_dev);
done:
@@ -121,7 +121,7 @@ int failover_slave_unregister(struct net_device *slave_dev)
netdev_rx_handler_unregister(slave_dev);
netdev_upper_dev_unlink(slave_dev, failover_dev);
- slave_dev->priv_flags &= ~(IFF_FAILOVER_SLAVE | IFF_LIVE_RENAME_OK);
+ slave_dev->priv_flags &= ~IFF_FAILOVER_SLAVE;
if (fops && fops->slave_unregister &&
!fops->slave_unregister(slave_dev, failover_dev))
diff --git a/net/core/gen_stats.c b/net/core/gen_stats.c
index c8d137ef5980..b71ccaec0991 100644
--- a/net/core/gen_stats.c
+++ b/net/core/gen_stats.c
@@ -135,10 +135,10 @@ static void gnet_stats_add_basic_cpu(struct gnet_stats_basic_sync *bstats,
u64 bytes, packets;
do {
- start = u64_stats_fetch_begin_irq(&bcpu->syncp);
+ start = u64_stats_fetch_begin(&bcpu->syncp);
bytes = u64_stats_read(&bcpu->bytes);
packets = u64_stats_read(&bcpu->packets);
- } while (u64_stats_fetch_retry_irq(&bcpu->syncp, start));
+ } while (u64_stats_fetch_retry(&bcpu->syncp, start));
t_bytes += bytes;
t_packets += packets;
@@ -162,10 +162,10 @@ void gnet_stats_add_basic(struct gnet_stats_basic_sync *bstats,
}
do {
if (running)
- start = u64_stats_fetch_begin_irq(&b->syncp);
+ start = u64_stats_fetch_begin(&b->syncp);
bytes = u64_stats_read(&b->bytes);
packets = u64_stats_read(&b->packets);
- } while (running && u64_stats_fetch_retry_irq(&b->syncp, start));
+ } while (running && u64_stats_fetch_retry(&b->syncp, start));
_bstats_update(bstats, bytes, packets);
}
@@ -187,10 +187,10 @@ static void gnet_stats_read_basic(u64 *ret_bytes, u64 *ret_packets,
u64 bytes, packets;
do {
- start = u64_stats_fetch_begin_irq(&bcpu->syncp);
+ start = u64_stats_fetch_begin(&bcpu->syncp);
bytes = u64_stats_read(&bcpu->bytes);
packets = u64_stats_read(&bcpu->packets);
- } while (u64_stats_fetch_retry_irq(&bcpu->syncp, start));
+ } while (u64_stats_fetch_retry(&bcpu->syncp, start));
t_bytes += bytes;
t_packets += packets;
@@ -201,10 +201,10 @@ static void gnet_stats_read_basic(u64 *ret_bytes, u64 *ret_packets,
}
do {
if (running)
- start = u64_stats_fetch_begin_irq(&b->syncp);
+ start = u64_stats_fetch_begin(&b->syncp);
*ret_bytes = u64_stats_read(&b->bytes);
*ret_packets = u64_stats_read(&b->packets);
- } while (running && u64_stats_fetch_retry_irq(&b->syncp, start));
+ } while (running && u64_stats_fetch_retry(&b->syncp, start));
}
static int
diff --git a/net/core/gro.c b/net/core/gro.c
index bc9451743307..8e0fe85a647d 100644
--- a/net/core/gro.c
+++ b/net/core/gro.c
@@ -489,45 +489,45 @@ static enum gro_result dev_gro_receive(struct napi_struct *napi, struct sk_buff
rcu_read_lock();
list_for_each_entry_rcu(ptype, head, list) {
- if (ptype->type != type || !ptype->callbacks.gro_receive)
- continue;
-
- skb_set_network_header(skb, skb_gro_offset(skb));
- skb_reset_mac_len(skb);
- BUILD_BUG_ON(sizeof_field(struct napi_gro_cb, zeroed) != sizeof(u32));
- BUILD_BUG_ON(!IS_ALIGNED(offsetof(struct napi_gro_cb, zeroed),
- sizeof(u32))); /* Avoid slow unaligned acc */
- *(u32 *)&NAPI_GRO_CB(skb)->zeroed = 0;
- NAPI_GRO_CB(skb)->flush = skb_has_frag_list(skb);
- NAPI_GRO_CB(skb)->is_atomic = 1;
- NAPI_GRO_CB(skb)->count = 1;
- if (unlikely(skb_is_gso(skb))) {
- NAPI_GRO_CB(skb)->count = skb_shinfo(skb)->gso_segs;
- /* Only support TCP at the moment. */
- if (!skb_is_gso_tcp(skb))
- NAPI_GRO_CB(skb)->flush = 1;
- }
-
- /* Setup for GRO checksum validation */
- switch (skb->ip_summed) {
- case CHECKSUM_COMPLETE:
- NAPI_GRO_CB(skb)->csum = skb->csum;
- NAPI_GRO_CB(skb)->csum_valid = 1;
- break;
- case CHECKSUM_UNNECESSARY:
- NAPI_GRO_CB(skb)->csum_cnt = skb->csum_level + 1;
- break;
- }
+ if (ptype->type == type && ptype->callbacks.gro_receive)
+ goto found_ptype;
+ }
+ rcu_read_unlock();
+ goto normal;
+
+found_ptype:
+ skb_set_network_header(skb, skb_gro_offset(skb));
+ skb_reset_mac_len(skb);
+ BUILD_BUG_ON(sizeof_field(struct napi_gro_cb, zeroed) != sizeof(u32));
+ BUILD_BUG_ON(!IS_ALIGNED(offsetof(struct napi_gro_cb, zeroed),
+ sizeof(u32))); /* Avoid slow unaligned acc */
+ *(u32 *)&NAPI_GRO_CB(skb)->zeroed = 0;
+ NAPI_GRO_CB(skb)->flush = skb_has_frag_list(skb);
+ NAPI_GRO_CB(skb)->is_atomic = 1;
+ NAPI_GRO_CB(skb)->count = 1;
+ if (unlikely(skb_is_gso(skb))) {
+ NAPI_GRO_CB(skb)->count = skb_shinfo(skb)->gso_segs;
+ /* Only support TCP at the moment. */
+ if (!skb_is_gso_tcp(skb))
+ NAPI_GRO_CB(skb)->flush = 1;
+ }
- pp = INDIRECT_CALL_INET(ptype->callbacks.gro_receive,
- ipv6_gro_receive, inet_gro_receive,
- &gro_list->list, skb);
+ /* Setup for GRO checksum validation */
+ switch (skb->ip_summed) {
+ case CHECKSUM_COMPLETE:
+ NAPI_GRO_CB(skb)->csum = skb->csum;
+ NAPI_GRO_CB(skb)->csum_valid = 1;
+ break;
+ case CHECKSUM_UNNECESSARY:
+ NAPI_GRO_CB(skb)->csum_cnt = skb->csum_level + 1;
break;
}
- rcu_read_unlock();
- if (&ptype->list == head)
- goto normal;
+ pp = INDIRECT_CALL_INET(ptype->callbacks.gro_receive,
+ ipv6_gro_receive, inet_gro_receive,
+ &gro_list->list, skb);
+
+ rcu_read_unlock();
if (PTR_ERR(pp) == -EINPROGRESS) {
ret = GRO_CONSUMED;
diff --git a/net/core/net-sysfs.c b/net/core/net-sysfs.c
index 8409d41405df..679b84cc8794 100644
--- a/net/core/net-sysfs.c
+++ b/net/core/net-sysfs.c
@@ -532,7 +532,7 @@ static ssize_t phys_port_name_show(struct device *dev,
* returning early without hitting the trylock/restart below.
*/
if (!netdev->netdev_ops->ndo_get_phys_port_name &&
- !netdev->netdev_ops->ndo_get_devlink_port)
+ !netdev->devlink_port)
return -EOPNOTSUPP;
if (!rtnl_trylock())
@@ -562,7 +562,7 @@ static ssize_t phys_switch_id_show(struct device *dev,
* because recurse is false when calling dev_get_port_parent_id.
*/
if (!netdev->netdev_ops->ndo_get_port_parent_id &&
- !netdev->netdev_ops->ndo_get_devlink_port)
+ !netdev->devlink_port)
return -EOPNOTSUPP;
if (!rtnl_trylock())
diff --git a/net/core/net_namespace.c b/net/core/net_namespace.c
index f64654df71a2..5581d22cc191 100644
--- a/net/core/net_namespace.c
+++ b/net/core/net_namespace.c
@@ -316,6 +316,7 @@ static __net_init int setup_net(struct net *net, struct user_namespace *user_ns)
refcount_set(&net->ns.count, 1);
ref_tracker_dir_init(&net->refcnt_tracker, 128);
+ ref_tracker_dir_init(&net->notrefcnt_tracker, 128);
refcount_set(&net->passive, 1);
get_random_bytes(&net->hash_mix, sizeof(u32));
@@ -436,6 +437,10 @@ static void net_free(struct net *net)
{
if (refcount_dec_and_test(&net->passive)) {
kfree(rcu_access_pointer(net->gen));
+
+ /* There should not be any trackers left there. */
+ ref_tracker_dir_exit(&net->notrefcnt_tracker);
+
kmem_cache_free(net_cachep, net);
}
}
diff --git a/net/core/rtnetlink.c b/net/core/rtnetlink.c
index 74864dc46a7e..64289bc98887 100644
--- a/net/core/rtnetlink.c
+++ b/net/core/rtnetlink.c
@@ -53,6 +53,7 @@
#include <net/fib_rules.h>
#include <net/rtnetlink.h>
#include <net/net_namespace.h>
+#include <net/devlink.h>
#include "dev.h"
@@ -760,7 +761,7 @@ int rtnl_unicast(struct sk_buff *skb, struct net *net, u32 pid)
EXPORT_SYMBOL(rtnl_unicast);
void rtnl_notify(struct sk_buff *skb, struct net *net, u32 pid, u32 group,
- struct nlmsghdr *nlh, gfp_t flags)
+ const struct nlmsghdr *nlh, gfp_t flags)
{
struct sock *rtnl = net->rtnl;
@@ -1038,6 +1039,16 @@ static size_t rtnl_proto_down_size(const struct net_device *dev)
return size;
}
+static size_t rtnl_devlink_port_size(const struct net_device *dev)
+{
+ size_t size = nla_total_size(0); /* nest IFLA_DEVLINK_PORT */
+
+ if (dev->devlink_port)
+ size += devlink_nl_port_handle_size(dev->devlink_port);
+
+ return size;
+}
+
static noinline size_t if_nlmsg_size(const struct net_device *dev,
u32 ext_filter_mask)
{
@@ -1091,6 +1102,7 @@ static noinline size_t if_nlmsg_size(const struct net_device *dev,
+ nla_total_size(4) /* IFLA_MAX_MTU */
+ rtnl_prop_list_size(dev)
+ nla_total_size(MAX_ADDR_LEN) /* IFLA_PERM_ADDRESS */
+ + rtnl_devlink_port_size(dev)
+ 0;
}
@@ -1728,6 +1740,30 @@ nla_put_failure:
return -EMSGSIZE;
}
+static int rtnl_fill_devlink_port(struct sk_buff *skb,
+ const struct net_device *dev)
+{
+ struct nlattr *devlink_port_nest;
+ int ret;
+
+ devlink_port_nest = nla_nest_start(skb, IFLA_DEVLINK_PORT);
+ if (!devlink_port_nest)
+ return -EMSGSIZE;
+
+ if (dev->devlink_port) {
+ ret = devlink_nl_port_handle_fill(skb, dev->devlink_port);
+ if (ret < 0)
+ goto nest_cancel;
+ }
+
+ nla_nest_end(skb, devlink_port_nest);
+ return 0;
+
+nest_cancel:
+ nla_nest_cancel(skb, devlink_port_nest);
+ return ret;
+}
+
static int rtnl_fill_ifinfo(struct sk_buff *skb,
struct net_device *dev, struct net *src_net,
int type, u32 pid, u32 seq, u32 change,
@@ -1865,6 +1901,9 @@ static int rtnl_fill_ifinfo(struct sk_buff *skb,
dev->dev.parent->bus->name))
goto nla_put_failure;
+ if (rtnl_fill_devlink_port(skb, dev))
+ goto nla_put_failure;
+
nlmsg_end(skb, nlh);
return 0;
@@ -3110,7 +3149,7 @@ static int rtnl_group_dellink(const struct net *net, int group)
return 0;
}
-int rtnl_delete_link(struct net_device *dev)
+int rtnl_delete_link(struct net_device *dev, u32 portid, const struct nlmsghdr *nlh)
{
const struct rtnl_link_ops *ops;
LIST_HEAD(list_kill);
@@ -3120,7 +3159,7 @@ int rtnl_delete_link(struct net_device *dev)
return -EOPNOTSUPP;
ops->dellink(dev, &list_kill);
- unregister_netdevice_many(&list_kill);
+ unregister_netdevice_many_notify(&list_kill, portid, nlh);
return 0;
}
@@ -3130,6 +3169,7 @@ static int rtnl_dellink(struct sk_buff *skb, struct nlmsghdr *nlh,
struct netlink_ext_ack *extack)
{
struct net *net = sock_net(skb->sk);
+ u32 portid = NETLINK_CB(skb).portid;
struct net *tgt_net = net;
struct net_device *dev = NULL;
struct ifinfomsg *ifm;
@@ -3171,7 +3211,7 @@ static int rtnl_dellink(struct sk_buff *skb, struct nlmsghdr *nlh,
goto out;
}
- err = rtnl_delete_link(dev);
+ err = rtnl_delete_link(dev, portid, nlh);
out:
if (netnsid >= 0)
@@ -3180,7 +3220,8 @@ out:
return err;
}
-int rtnl_configure_link(struct net_device *dev, const struct ifinfomsg *ifm)
+int rtnl_configure_link(struct net_device *dev, const struct ifinfomsg *ifm,
+ u32 portid, const struct nlmsghdr *nlh)
{
unsigned int old_flags;
int err;
@@ -3194,10 +3235,10 @@ int rtnl_configure_link(struct net_device *dev, const struct ifinfomsg *ifm)
}
if (dev->rtnl_link_state == RTNL_LINK_INITIALIZED) {
- __dev_notify_flags(dev, old_flags, (old_flags ^ dev->flags));
+ __dev_notify_flags(dev, old_flags, (old_flags ^ dev->flags), portid, nlh);
} else {
dev->rtnl_link_state = RTNL_LINK_INITIALIZED;
- __dev_notify_flags(dev, old_flags, ~0U);
+ __dev_notify_flags(dev, old_flags, ~0U, portid, nlh);
}
return 0;
}
@@ -3311,11 +3352,13 @@ static int rtnl_group_changelink(const struct sk_buff *skb,
static int rtnl_newlink_create(struct sk_buff *skb, struct ifinfomsg *ifm,
const struct rtnl_link_ops *ops,
+ const struct nlmsghdr *nlh,
struct nlattr **tb, struct nlattr **data,
struct netlink_ext_ack *extack)
{
unsigned char name_assign_type = NET_NAME_USER;
struct net *net = sock_net(skb->sk);
+ u32 portid = NETLINK_CB(skb).portid;
struct net *dest_net, *link_net;
struct net_device *dev;
char ifname[IFNAMSIZ];
@@ -3369,7 +3412,7 @@ static int rtnl_newlink_create(struct sk_buff *skb, struct ifinfomsg *ifm,
goto out;
}
- err = rtnl_configure_link(dev, ifm);
+ err = rtnl_configure_link(dev, ifm, portid, nlh);
if (err < 0)
goto out_unregister;
if (link_net) {
@@ -3578,7 +3621,7 @@ replay:
return -EOPNOTSUPP;
}
- return rtnl_newlink_create(skb, ifm, ops, tb, data, extack);
+ return rtnl_newlink_create(skb, ifm, ops, nlh, tb, data, extack);
}
static int rtnl_newlink(struct sk_buff *skb, struct nlmsghdr *nlh,
@@ -3896,7 +3939,7 @@ static int rtnl_dump_all(struct sk_buff *skb, struct netlink_callback *cb)
struct sk_buff *rtmsg_ifinfo_build_skb(int type, struct net_device *dev,
unsigned int change,
u32 event, gfp_t flags, int *new_nsid,
- int new_ifindex)
+ int new_ifindex, u32 portid, u32 seq)
{
struct net *net = dev_net(dev);
struct sk_buff *skb;
@@ -3907,7 +3950,7 @@ struct sk_buff *rtmsg_ifinfo_build_skb(int type, struct net_device *dev,
goto errout;
err = rtnl_fill_ifinfo(skb, dev, dev_net(dev),
- type, 0, 0, change, 0, 0, event,
+ type, portid, seq, change, 0, 0, event,
new_nsid, new_ifindex, -1, flags);
if (err < 0) {
/* -EMSGSIZE implies BUG in if_nlmsg_size() */
@@ -3922,16 +3965,18 @@ errout:
return NULL;
}
-void rtmsg_ifinfo_send(struct sk_buff *skb, struct net_device *dev, gfp_t flags)
+void rtmsg_ifinfo_send(struct sk_buff *skb, struct net_device *dev, gfp_t flags,
+ u32 portid, const struct nlmsghdr *nlh)
{
struct net *net = dev_net(dev);
- rtnl_notify(skb, net, 0, RTNLGRP_LINK, NULL, flags);
+ rtnl_notify(skb, net, portid, RTNLGRP_LINK, nlh, flags);
}
static void rtmsg_ifinfo_event(int type, struct net_device *dev,
unsigned int change, u32 event,
- gfp_t flags, int *new_nsid, int new_ifindex)
+ gfp_t flags, int *new_nsid, int new_ifindex,
+ u32 portid, const struct nlmsghdr *nlh)
{
struct sk_buff *skb;
@@ -3939,23 +3984,23 @@ static void rtmsg_ifinfo_event(int type, struct net_device *dev,
return;
skb = rtmsg_ifinfo_build_skb(type, dev, change, event, flags, new_nsid,
- new_ifindex);
+ new_ifindex, portid, nlmsg_seq(nlh));
if (skb)
- rtmsg_ifinfo_send(skb, dev, flags);
+ rtmsg_ifinfo_send(skb, dev, flags, portid, nlh);
}
void rtmsg_ifinfo(int type, struct net_device *dev, unsigned int change,
- gfp_t flags)
+ gfp_t flags, u32 portid, const struct nlmsghdr *nlh)
{
rtmsg_ifinfo_event(type, dev, change, rtnl_get_event(0), flags,
- NULL, 0);
+ NULL, 0, portid, nlh);
}
void rtmsg_ifinfo_newnet(int type, struct net_device *dev, unsigned int change,
gfp_t flags, int *new_nsid, int new_ifindex)
{
rtmsg_ifinfo_event(type, dev, change, rtnl_get_event(0), flags,
- new_nsid, new_ifindex);
+ new_nsid, new_ifindex, 0, NULL);
}
static int nlmsg_populate_fdb_fill(struct sk_buff *skb,
@@ -4045,6 +4090,11 @@ int ndo_dflt_fdb_add(struct ndmsg *ndm,
return err;
}
+ if (tb[NDA_FLAGS_EXT]) {
+ netdev_info(dev, "invalid flags given to default FDB implementation\n");
+ return err;
+ }
+
if (vid) {
netdev_info(dev, "vlans aren't supported yet for dev_uc|mc_add()\n");
return err;
@@ -6140,7 +6190,7 @@ static int rtnetlink_event(struct notifier_block *this, unsigned long event, voi
case NETDEV_CHANGELOWERSTATE:
case NETDEV_CHANGE_TX_QUEUE_LEN:
rtmsg_ifinfo_event(RTM_NEWLINK, dev, 0, rtnl_get_event(event),
- GFP_KERNEL, NULL, 0);
+ GFP_KERNEL, NULL, 0, 0, NULL);
break;
default:
break;
diff --git a/net/core/skbuff.c b/net/core/skbuff.c
index 88fa40571d0c..90d085290d49 100644
--- a/net/core/skbuff.c
+++ b/net/core/skbuff.c
@@ -94,6 +94,7 @@ EXPORT_SYMBOL(sysctl_max_skb_frags);
#undef FN
#define FN(reason) [SKB_DROP_REASON_##reason] = #reason,
const char * const drop_reasons[] = {
+ [SKB_CONSUMED] = "CONSUMED",
DEFINE_DROP_REASON(FN, FN)
};
EXPORT_SYMBOL(drop_reasons);
@@ -506,14 +507,14 @@ struct sk_buff *__alloc_skb(unsigned int size, gfp_t gfp_mask,
*/
size = SKB_DATA_ALIGN(size);
size += SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
- data = kmalloc_reserve(size, gfp_mask, node, &pfmemalloc);
+ osize = kmalloc_size_roundup(size);
+ data = kmalloc_reserve(osize, gfp_mask, node, &pfmemalloc);
if (unlikely(!data))
goto nodata;
- /* kmalloc(size) might give us more room than requested.
+ /* kmalloc_size_roundup() might give us more room than requested.
* Put skb_shared_info exactly at the end of allocated zone,
* to allow max possible filling before reallocation.
*/
- osize = ksize(data);
size = SKB_WITH_OVERHEAD(osize);
prefetchw(data + size);
@@ -748,6 +749,13 @@ static void skb_clone_fraglist(struct sk_buff *skb)
skb_get(list);
}
+static bool skb_pp_recycle(struct sk_buff *skb, void *data)
+{
+ if (!IS_ENABLED(CONFIG_PAGE_POOL) || !skb->pp_recycle)
+ return false;
+ return page_pool_return_skb_page(virt_to_page(data));
+}
+
static void skb_free_head(struct sk_buff *skb)
{
unsigned char *head = skb->head;
@@ -761,7 +769,7 @@ static void skb_free_head(struct sk_buff *skb)
}
}
-static void skb_release_data(struct sk_buff *skb)
+static void skb_release_data(struct sk_buff *skb, enum skb_drop_reason reason)
{
struct skb_shared_info *shinfo = skb_shinfo(skb);
int i;
@@ -784,7 +792,7 @@ static void skb_release_data(struct sk_buff *skb)
free_head:
if (shinfo->frag_list)
- kfree_skb_list(shinfo->frag_list);
+ kfree_skb_list_reason(shinfo->frag_list, reason);
skb_free_head(skb);
exit:
@@ -847,11 +855,11 @@ void skb_release_head_state(struct sk_buff *skb)
}
/* Free everything but the sk_buff shell. */
-static void skb_release_all(struct sk_buff *skb)
+static void skb_release_all(struct sk_buff *skb, enum skb_drop_reason reason)
{
skb_release_head_state(skb);
if (likely(skb->head))
- skb_release_data(skb);
+ skb_release_data(skb, reason);
}
/**
@@ -865,7 +873,7 @@ static void skb_release_all(struct sk_buff *skb)
void __kfree_skb(struct sk_buff *skb)
{
- skb_release_all(skb);
+ skb_release_all(skb, SKB_DROP_REASON_NOT_SPECIFIED);
kfree_skbmem(skb);
}
EXPORT_SYMBOL(__kfree_skb);
@@ -887,7 +895,10 @@ kfree_skb_reason(struct sk_buff *skb, enum skb_drop_reason reason)
DEBUG_NET_WARN_ON_ONCE(reason <= 0 || reason >= SKB_DROP_REASON_MAX);
- trace_kfree_skb(skb, __builtin_return_address(0), reason);
+ if (reason == SKB_CONSUMED)
+ trace_consume_skb(skb);
+ else
+ trace_kfree_skb(skb, __builtin_return_address(0), reason);
__kfree_skb(skb);
}
EXPORT_SYMBOL(kfree_skb_reason);
@@ -1045,7 +1056,7 @@ EXPORT_SYMBOL(consume_skb);
void __consume_stateless_skb(struct sk_buff *skb)
{
trace_consume_skb(skb);
- skb_release_data(skb);
+ skb_release_data(skb, SKB_CONSUMED);
kfree_skbmem(skb);
}
@@ -1070,7 +1081,7 @@ static void napi_skb_cache_put(struct sk_buff *skb)
void __kfree_skb_defer(struct sk_buff *skb)
{
- skb_release_all(skb);
+ skb_release_all(skb, SKB_DROP_REASON_NOT_SPECIFIED);
napi_skb_cache_put(skb);
}
@@ -1108,7 +1119,7 @@ void napi_consume_skb(struct sk_buff *skb, int budget)
return;
}
- skb_release_all(skb);
+ skb_release_all(skb, SKB_CONSUMED);
napi_skb_cache_put(skb);
}
EXPORT_SYMBOL(napi_consume_skb);
@@ -1239,7 +1250,7 @@ EXPORT_SYMBOL_GPL(alloc_skb_for_msg);
*/
struct sk_buff *skb_morph(struct sk_buff *dst, struct sk_buff *src)
{
- skb_release_all(dst);
+ skb_release_all(dst, SKB_CONSUMED);
return __skb_clone(dst, src);
}
EXPORT_SYMBOL_GPL(skb_morph);
@@ -1814,10 +1825,11 @@ EXPORT_SYMBOL(__pskb_copy_fclone);
int pskb_expand_head(struct sk_buff *skb, int nhead, int ntail,
gfp_t gfp_mask)
{
- int i, osize = skb_end_offset(skb);
- int size = osize + nhead + ntail;
+ unsigned int osize = skb_end_offset(skb);
+ unsigned int size = osize + nhead + ntail;
long off;
u8 *data;
+ int i;
BUG_ON(nhead < 0);
@@ -1825,15 +1837,16 @@ int pskb_expand_head(struct sk_buff *skb, int nhead, int ntail,
skb_zcopy_downgrade_managed(skb);
- size = SKB_DATA_ALIGN(size);
-
if (skb_pfmemalloc(skb))
gfp_mask |= __GFP_MEMALLOC;
- data = kmalloc_reserve(size + SKB_DATA_ALIGN(sizeof(struct skb_shared_info)),
- gfp_mask, NUMA_NO_NODE, NULL);
+
+ size = SKB_DATA_ALIGN(size);
+ size += SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
+ size = kmalloc_size_roundup(size);
+ data = kmalloc_reserve(size, gfp_mask, NUMA_NO_NODE, NULL);
if (!data)
goto nodata;
- size = SKB_WITH_OVERHEAD(ksize(data));
+ size = SKB_WITH_OVERHEAD(size);
/* Copy only real data... and, alas, header. This should be
* optimized for the cases when header is void.
@@ -1860,7 +1873,7 @@ int pskb_expand_head(struct sk_buff *skb, int nhead, int ntail,
if (skb_has_frag_list(skb))
skb_clone_fraglist(skb);
- skb_release_data(skb);
+ skb_release_data(skb, SKB_CONSUMED);
} else {
skb_free_head(skb);
}
@@ -6169,21 +6182,20 @@ static int pskb_carve_inside_header(struct sk_buff *skb, const u32 off,
const int headlen, gfp_t gfp_mask)
{
int i;
- int size = skb_end_offset(skb);
+ unsigned int size = skb_end_offset(skb);
int new_hlen = headlen - off;
u8 *data;
- size = SKB_DATA_ALIGN(size);
-
if (skb_pfmemalloc(skb))
gfp_mask |= __GFP_MEMALLOC;
- data = kmalloc_reserve(size +
- SKB_DATA_ALIGN(sizeof(struct skb_shared_info)),
- gfp_mask, NUMA_NO_NODE, NULL);
+
+ size = SKB_DATA_ALIGN(size);
+ size += SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
+ size = kmalloc_size_roundup(size);
+ data = kmalloc_reserve(size, gfp_mask, NUMA_NO_NODE, NULL);
if (!data)
return -ENOMEM;
-
- size = SKB_WITH_OVERHEAD(ksize(data));
+ size = SKB_WITH_OVERHEAD(size);
/* Copy real data, and all frags */
skb_copy_from_linear_data_offset(skb, off, data, new_hlen);
@@ -6203,7 +6215,7 @@ static int pskb_carve_inside_header(struct sk_buff *skb, const u32 off,
skb_frag_ref(skb, i);
if (skb_has_frag_list(skb))
skb_clone_fraglist(skb);
- skb_release_data(skb);
+ skb_release_data(skb, SKB_CONSUMED);
} else {
/* we can reuse existing recount- all we did was
* relocate values
@@ -6288,22 +6300,21 @@ static int pskb_carve_inside_nonlinear(struct sk_buff *skb, const u32 off,
int pos, gfp_t gfp_mask)
{
int i, k = 0;
- int size = skb_end_offset(skb);
+ unsigned int size = skb_end_offset(skb);
u8 *data;
const int nfrags = skb_shinfo(skb)->nr_frags;
struct skb_shared_info *shinfo;
- size = SKB_DATA_ALIGN(size);
-
if (skb_pfmemalloc(skb))
gfp_mask |= __GFP_MEMALLOC;
- data = kmalloc_reserve(size +
- SKB_DATA_ALIGN(sizeof(struct skb_shared_info)),
- gfp_mask, NUMA_NO_NODE, NULL);
+
+ size = SKB_DATA_ALIGN(size);
+ size += SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
+ size = kmalloc_size_roundup(size);
+ data = kmalloc_reserve(size, gfp_mask, NUMA_NO_NODE, NULL);
if (!data)
return -ENOMEM;
-
- size = SKB_WITH_OVERHEAD(ksize(data));
+ size = SKB_WITH_OVERHEAD(size);
memcpy((struct skb_shared_info *)(data + size),
skb_shinfo(skb), offsetof(struct skb_shared_info, frags[0]));
@@ -6347,7 +6358,7 @@ static int pskb_carve_inside_nonlinear(struct sk_buff *skb, const u32 off,
kfree(data);
return -ENOMEM;
}
- skb_release_data(skb);
+ skb_release_data(skb, SKB_CONSUMED);
skb->head = data;
skb->head_frag = 0;
@@ -6426,6 +6437,7 @@ void skb_condense(struct sk_buff *skb)
*/
skb->truesize = SKB_TRUESIZE(skb_end_offset(skb));
}
+EXPORT_SYMBOL(skb_condense);
#ifdef CONFIG_SKB_EXTENSIONS
static void *skb_ext_get_ptr(struct skb_ext *ext, enum skb_ext_id id)
diff --git a/net/core/sock.c b/net/core/sock.c
index a3ba0358c77c..4571914a4aa8 100644
--- a/net/core/sock.c
+++ b/net/core/sock.c
@@ -1436,7 +1436,7 @@ set_sndbuf:
break;
}
case SO_INCOMING_CPU:
- WRITE_ONCE(sk->sk_incoming_cpu, val);
+ reuseport_update_incoming_cpu(sk, val);
break;
case SO_CNX_ADVICE:
@@ -2094,6 +2094,9 @@ struct sock *sk_alloc(struct net *net, int family, gfp_t priority,
if (likely(sk->sk_net_refcnt)) {
get_net_track(net, &sk->ns_tracker, priority);
sock_inuse_add(net, 1);
+ } else {
+ __netns_tracker_alloc(net, &sk->ns_tracker,
+ false, priority);
}
sock_net_set(sk, net);
@@ -2149,6 +2152,9 @@ static void __sk_destruct(struct rcu_head *head)
if (likely(sk->sk_net_refcnt))
put_net_track(sock_net(sk), &sk->ns_tracker);
+ else
+ __netns_tracker_free(sock_net(sk), &sk->ns_tracker, false);
+
sk_prot_free(sk->sk_prot_creator, sk);
}
@@ -2237,6 +2243,14 @@ struct sock *sk_clone_lock(const struct sock *sk, const gfp_t priority)
if (likely(newsk->sk_net_refcnt)) {
get_net_track(sock_net(newsk), &newsk->ns_tracker, priority);
sock_inuse_add(sock_net(newsk), 1);
+ } else {
+ /* Kernel sockets are not elevating the struct net refcount.
+ * Instead, use a tracker to more easily detect if a layer
+ * is not properly dismantling its kernel sockets at netns
+ * destroy time.
+ */
+ __netns_tracker_alloc(sock_net(newsk), &newsk->ns_tracker,
+ false, priority);
}
sk_node_init(&newsk->sk_node);
sock_lock_init(newsk);
@@ -2730,7 +2744,7 @@ failure:
}
EXPORT_SYMBOL(sock_alloc_send_pskb);
-int __sock_cmsg_send(struct sock *sk, struct msghdr *msg, struct cmsghdr *cmsg,
+int __sock_cmsg_send(struct sock *sk, struct cmsghdr *cmsg,
struct sockcm_cookie *sockc)
{
u32 tsflags;
@@ -2784,7 +2798,7 @@ int sock_cmsg_send(struct sock *sk, struct msghdr *msg,
return -EINVAL;
if (cmsg->cmsg_level != SOL_SOCKET)
continue;
- ret = __sock_cmsg_send(sk, msg, cmsg, sockc);
+ ret = __sock_cmsg_send(sk, cmsg, sockc);
if (ret)
return ret;
}
diff --git a/net/core/sock_reuseport.c b/net/core/sock_reuseport.c
index fb90e1e00773..5a165286e4d8 100644
--- a/net/core/sock_reuseport.c
+++ b/net/core/sock_reuseport.c
@@ -37,6 +37,70 @@ void reuseport_has_conns_set(struct sock *sk)
}
EXPORT_SYMBOL(reuseport_has_conns_set);
+static void __reuseport_get_incoming_cpu(struct sock_reuseport *reuse)
+{
+ /* Paired with READ_ONCE() in reuseport_select_sock_by_hash(). */
+ WRITE_ONCE(reuse->incoming_cpu, reuse->incoming_cpu + 1);
+}
+
+static void __reuseport_put_incoming_cpu(struct sock_reuseport *reuse)
+{
+ /* Paired with READ_ONCE() in reuseport_select_sock_by_hash(). */
+ WRITE_ONCE(reuse->incoming_cpu, reuse->incoming_cpu - 1);
+}
+
+static void reuseport_get_incoming_cpu(struct sock *sk, struct sock_reuseport *reuse)
+{
+ if (sk->sk_incoming_cpu >= 0)
+ __reuseport_get_incoming_cpu(reuse);
+}
+
+static void reuseport_put_incoming_cpu(struct sock *sk, struct sock_reuseport *reuse)
+{
+ if (sk->sk_incoming_cpu >= 0)
+ __reuseport_put_incoming_cpu(reuse);
+}
+
+void reuseport_update_incoming_cpu(struct sock *sk, int val)
+{
+ struct sock_reuseport *reuse;
+ int old_sk_incoming_cpu;
+
+ if (unlikely(!rcu_access_pointer(sk->sk_reuseport_cb))) {
+ /* Paired with REAE_ONCE() in sk_incoming_cpu_update()
+ * and compute_score().
+ */
+ WRITE_ONCE(sk->sk_incoming_cpu, val);
+ return;
+ }
+
+ spin_lock_bh(&reuseport_lock);
+
+ /* This must be done under reuseport_lock to avoid a race with
+ * reuseport_grow(), which accesses sk->sk_incoming_cpu without
+ * lock_sock() when detaching a shutdown()ed sk.
+ *
+ * Paired with READ_ONCE() in reuseport_select_sock_by_hash().
+ */
+ old_sk_incoming_cpu = sk->sk_incoming_cpu;
+ WRITE_ONCE(sk->sk_incoming_cpu, val);
+
+ reuse = rcu_dereference_protected(sk->sk_reuseport_cb,
+ lockdep_is_held(&reuseport_lock));
+
+ /* reuseport_grow() has detached a closed sk. */
+ if (!reuse)
+ goto out;
+
+ if (old_sk_incoming_cpu < 0 && val >= 0)
+ __reuseport_get_incoming_cpu(reuse);
+ else if (old_sk_incoming_cpu >= 0 && val < 0)
+ __reuseport_put_incoming_cpu(reuse);
+
+out:
+ spin_unlock_bh(&reuseport_lock);
+}
+
static int reuseport_sock_index(struct sock *sk,
const struct sock_reuseport *reuse,
bool closed)
@@ -64,6 +128,7 @@ static void __reuseport_add_sock(struct sock *sk,
/* paired with smp_rmb() in reuseport_(select|migrate)_sock() */
smp_wmb();
reuse->num_socks++;
+ reuseport_get_incoming_cpu(sk, reuse);
}
static bool __reuseport_detach_sock(struct sock *sk,
@@ -76,6 +141,7 @@ static bool __reuseport_detach_sock(struct sock *sk,
reuse->socks[i] = reuse->socks[reuse->num_socks - 1];
reuse->num_socks--;
+ reuseport_put_incoming_cpu(sk, reuse);
return true;
}
@@ -86,6 +152,7 @@ static void __reuseport_add_closed_sock(struct sock *sk,
reuse->socks[reuse->max_socks - reuse->num_closed_socks - 1] = sk;
/* paired with READ_ONCE() in inet_csk_bind_conflict() */
WRITE_ONCE(reuse->num_closed_socks, reuse->num_closed_socks + 1);
+ reuseport_get_incoming_cpu(sk, reuse);
}
static bool __reuseport_detach_closed_sock(struct sock *sk,
@@ -99,6 +166,7 @@ static bool __reuseport_detach_closed_sock(struct sock *sk,
reuse->socks[i] = reuse->socks[reuse->max_socks - reuse->num_closed_socks];
/* paired with READ_ONCE() in inet_csk_bind_conflict() */
WRITE_ONCE(reuse->num_closed_socks, reuse->num_closed_socks - 1);
+ reuseport_put_incoming_cpu(sk, reuse);
return true;
}
@@ -166,6 +234,7 @@ int reuseport_alloc(struct sock *sk, bool bind_inany)
reuse->bind_inany = bind_inany;
reuse->socks[0] = sk;
reuse->num_socks = 1;
+ reuseport_get_incoming_cpu(sk, reuse);
rcu_assign_pointer(sk->sk_reuseport_cb, reuse);
out:
@@ -209,6 +278,7 @@ static struct sock_reuseport *reuseport_grow(struct sock_reuseport *reuse)
more_reuse->reuseport_id = reuse->reuseport_id;
more_reuse->bind_inany = reuse->bind_inany;
more_reuse->has_conns = reuse->has_conns;
+ more_reuse->incoming_cpu = reuse->incoming_cpu;
memcpy(more_reuse->socks, reuse->socks,
reuse->num_socks * sizeof(struct sock *));
@@ -458,18 +528,32 @@ static struct sock *run_bpf_filter(struct sock_reuseport *reuse, u16 socks,
static struct sock *reuseport_select_sock_by_hash(struct sock_reuseport *reuse,
u32 hash, u16 num_socks)
{
+ struct sock *first_valid_sk = NULL;
int i, j;
i = j = reciprocal_scale(hash, num_socks);
- while (reuse->socks[i]->sk_state == TCP_ESTABLISHED) {
+ do {
+ struct sock *sk = reuse->socks[i];
+
+ if (sk->sk_state != TCP_ESTABLISHED) {
+ /* Paired with WRITE_ONCE() in __reuseport_(get|put)_incoming_cpu(). */
+ if (!READ_ONCE(reuse->incoming_cpu))
+ return sk;
+
+ /* Paired with WRITE_ONCE() in reuseport_update_incoming_cpu(). */
+ if (READ_ONCE(sk->sk_incoming_cpu) == raw_smp_processor_id())
+ return sk;
+
+ if (!first_valid_sk)
+ first_valid_sk = sk;
+ }
+
i++;
if (i >= num_socks)
i = 0;
- if (i == j)
- return NULL;
- }
+ } while (i != j);
- return reuse->socks[i];
+ return first_valid_sk;
}
/**
diff --git a/net/core/utils.c b/net/core/utils.c
index 938495bc1d34..c994e95172ac 100644
--- a/net/core/utils.c
+++ b/net/core/utils.c
@@ -302,7 +302,7 @@ static int inet4_pton(const char *src, u16 port_num,
struct sockaddr_storage *addr)
{
struct sockaddr_in *addr4 = (struct sockaddr_in *)addr;
- int srclen = strlen(src);
+ size_t srclen = strlen(src);
if (srclen > INET_ADDRSTRLEN)
return -EINVAL;
@@ -322,7 +322,7 @@ static int inet6_pton(struct net *net, const char *src, u16 port_num,
{
struct sockaddr_in6 *addr6 = (struct sockaddr_in6 *)addr;
const char *scope_delim;
- int srclen = strlen(src);
+ size_t srclen = strlen(src);
if (srclen > INET6_ADDRSTRLEN)
return -EINVAL;
diff --git a/net/dcb/dcbnl.c b/net/dcb/dcbnl.c
index dc4fb699b56c..cec0632f96db 100644
--- a/net/dcb/dcbnl.c
+++ b/net/dcb/dcbnl.c
@@ -166,6 +166,7 @@ static const struct nla_policy dcbnl_ieee_policy[DCB_ATTR_IEEE_MAX + 1] = {
[DCB_ATTR_IEEE_QCN] = {.len = sizeof(struct ieee_qcn)},
[DCB_ATTR_IEEE_QCN_STATS] = {.len = sizeof(struct ieee_qcn_stats)},
[DCB_ATTR_DCB_BUFFER] = {.len = sizeof(struct dcbnl_buffer)},
+ [DCB_ATTR_DCB_APP_TRUST_TABLE] = {.type = NLA_NESTED},
};
/* DCB number of traffic classes nested attributes. */
@@ -179,6 +180,38 @@ static const struct nla_policy dcbnl_featcfg_nest[DCB_FEATCFG_ATTR_MAX + 1] = {
static LIST_HEAD(dcb_app_list);
static DEFINE_SPINLOCK(dcb_lock);
+static enum ieee_attrs_app dcbnl_app_attr_type_get(u8 selector)
+{
+ switch (selector) {
+ case IEEE_8021QAZ_APP_SEL_ETHERTYPE:
+ case IEEE_8021QAZ_APP_SEL_STREAM:
+ case IEEE_8021QAZ_APP_SEL_DGRAM:
+ case IEEE_8021QAZ_APP_SEL_ANY:
+ case IEEE_8021QAZ_APP_SEL_DSCP:
+ return DCB_ATTR_IEEE_APP;
+ case DCB_APP_SEL_PCP:
+ return DCB_ATTR_DCB_APP;
+ default:
+ return DCB_ATTR_IEEE_APP_UNSPEC;
+ }
+}
+
+static bool dcbnl_app_attr_type_validate(enum ieee_attrs_app type)
+{
+ switch (type) {
+ case DCB_ATTR_IEEE_APP:
+ case DCB_ATTR_DCB_APP:
+ return true;
+ default:
+ return false;
+ }
+}
+
+static bool dcbnl_app_selector_validate(enum ieee_attrs_app type, u8 selector)
+{
+ return dcbnl_app_attr_type_get(selector) == type;
+}
+
static struct sk_buff *dcbnl_newmsg(int type, u8 cmd, u32 port, u32 seq,
u32 flags, struct nlmsghdr **nlhp)
{
@@ -1030,9 +1063,9 @@ nla_put_failure:
/* Handle IEEE 802.1Qaz/802.1Qau/802.1Qbb GET commands. */
static int dcbnl_ieee_fill(struct sk_buff *skb, struct net_device *netdev)
{
- struct nlattr *ieee, *app;
- struct dcb_app_type *itr;
const struct dcbnl_rtnl_ops *ops = netdev->dcbnl_ops;
+ struct nlattr *ieee, *app, *apptrust;
+ struct dcb_app_type *itr;
int dcbx;
int err;
@@ -1116,8 +1149,9 @@ static int dcbnl_ieee_fill(struct sk_buff *skb, struct net_device *netdev)
spin_lock_bh(&dcb_lock);
list_for_each_entry(itr, &dcb_app_list, list) {
if (itr->ifindex == netdev->ifindex) {
- err = nla_put(skb, DCB_ATTR_IEEE_APP, sizeof(itr->app),
- &itr->app);
+ enum ieee_attrs_app type =
+ dcbnl_app_attr_type_get(itr->app.selector);
+ err = nla_put(skb, type, sizeof(itr->app), &itr->app);
if (err) {
spin_unlock_bh(&dcb_lock);
return -EMSGSIZE;
@@ -1133,6 +1167,30 @@ static int dcbnl_ieee_fill(struct sk_buff *skb, struct net_device *netdev)
spin_unlock_bh(&dcb_lock);
nla_nest_end(skb, app);
+ if (ops->dcbnl_getapptrust) {
+ u8 selectors[IEEE_8021QAZ_APP_SEL_MAX + 1] = {0};
+ int nselectors, i;
+
+ apptrust = nla_nest_start(skb, DCB_ATTR_DCB_APP_TRUST_TABLE);
+ if (!apptrust)
+ return -EMSGSIZE;
+
+ err = ops->dcbnl_getapptrust(netdev, selectors, &nselectors);
+ if (!err) {
+ for (i = 0; i < nselectors; i++) {
+ enum ieee_attrs_app type =
+ dcbnl_app_attr_type_get(selectors[i]);
+ err = nla_put_u8(skb, type, selectors[i]);
+ if (err) {
+ nla_nest_cancel(skb, apptrust);
+ return err;
+ }
+ }
+ }
+
+ nla_nest_end(skb, apptrust);
+ }
+
/* get peer info if available */
if (ops->ieee_peer_getets) {
struct ieee_ets ets;
@@ -1493,9 +1551,10 @@ static int dcbnl_ieee_set(struct net_device *netdev, struct nlmsghdr *nlh,
int rem;
nla_for_each_nested(attr, ieee[DCB_ATTR_IEEE_APP_TABLE], rem) {
+ enum ieee_attrs_app type = nla_type(attr);
struct dcb_app *app_data;
- if (nla_type(attr) != DCB_ATTR_IEEE_APP)
+ if (!dcbnl_app_attr_type_validate(type))
continue;
if (nla_len(attr) < sizeof(struct dcb_app)) {
@@ -1504,6 +1563,13 @@ static int dcbnl_ieee_set(struct net_device *netdev, struct nlmsghdr *nlh,
}
app_data = nla_data(attr);
+
+ if (!dcbnl_app_selector_validate(type,
+ app_data->selector)) {
+ err = -EINVAL;
+ goto err;
+ }
+
if (ops->ieee_setapp)
err = ops->ieee_setapp(netdev, app_data);
else
@@ -1513,6 +1579,53 @@ static int dcbnl_ieee_set(struct net_device *netdev, struct nlmsghdr *nlh,
}
}
+ if (ieee[DCB_ATTR_DCB_APP_TRUST_TABLE]) {
+ u8 selectors[IEEE_8021QAZ_APP_SEL_MAX + 1] = {0};
+ struct nlattr *attr;
+ int nselectors = 0;
+ int rem;
+
+ if (!ops->dcbnl_setapptrust) {
+ err = -EOPNOTSUPP;
+ goto err;
+ }
+
+ nla_for_each_nested(attr, ieee[DCB_ATTR_DCB_APP_TRUST_TABLE],
+ rem) {
+ enum ieee_attrs_app type = nla_type(attr);
+ u8 selector;
+ int i;
+
+ if (!dcbnl_app_attr_type_validate(type) ||
+ nla_len(attr) != 1 ||
+ nselectors >= sizeof(selectors)) {
+ err = -EINVAL;
+ goto err;
+ }
+
+ selector = nla_get_u8(attr);
+
+ if (!dcbnl_app_selector_validate(type, selector)) {
+ err = -EINVAL;
+ goto err;
+ }
+
+ /* Duplicate selector ? */
+ for (i = 0; i < nselectors; i++) {
+ if (selectors[i] == selector) {
+ err = -EINVAL;
+ goto err;
+ }
+ }
+
+ selectors[nselectors++] = selector;
+ }
+
+ err = ops->dcbnl_setapptrust(netdev, selectors, nselectors);
+ if (err)
+ goto err;
+ }
+
err:
err = nla_put_u8(skb, DCB_ATTR_IEEE, err);
dcbnl_ieee_notify(netdev, RTM_SETDCB, DCB_CMD_IEEE_SET, seq, 0);
@@ -1554,11 +1667,20 @@ static int dcbnl_ieee_del(struct net_device *netdev, struct nlmsghdr *nlh,
int rem;
nla_for_each_nested(attr, ieee[DCB_ATTR_IEEE_APP_TABLE], rem) {
+ enum ieee_attrs_app type = nla_type(attr);
struct dcb_app *app_data;
- if (nla_type(attr) != DCB_ATTR_IEEE_APP)
+ if (!dcbnl_app_attr_type_validate(type))
continue;
+
app_data = nla_data(attr);
+
+ if (!dcbnl_app_selector_validate(type,
+ app_data->selector)) {
+ err = -EINVAL;
+ goto err;
+ }
+
if (ops->ieee_delapp)
err = ops->ieee_delapp(netdev, app_data);
else
diff --git a/net/dccp/dccp.h b/net/dccp/dccp.h
index 7dfc00c9fb32..9ddc3a9e89e4 100644
--- a/net/dccp/dccp.h
+++ b/net/dccp/dccp.h
@@ -278,6 +278,7 @@ int dccp_rcv_state_process(struct sock *sk, struct sk_buff *skb,
int dccp_rcv_established(struct sock *sk, struct sk_buff *skb,
const struct dccp_hdr *dh, const unsigned int len);
+void dccp_destruct_common(struct sock *sk);
int dccp_init_sock(struct sock *sk, const __u8 ctl_sock_initialized);
void dccp_destroy_sock(struct sock *sk);
diff --git a/net/dccp/ipv6.c b/net/dccp/ipv6.c
index e57b43006074..ae62b1591dea 100644
--- a/net/dccp/ipv6.c
+++ b/net/dccp/ipv6.c
@@ -1021,6 +1021,12 @@ static const struct inet_connection_sock_af_ops dccp_ipv6_mapped = {
.sockaddr_len = sizeof(struct sockaddr_in6),
};
+static void dccp_v6_sk_destruct(struct sock *sk)
+{
+ dccp_destruct_common(sk);
+ inet6_sock_destruct(sk);
+}
+
/* NOTE: A lot of things set to zero explicitly by call to
* sk_alloc() so need not be done here.
*/
@@ -1033,17 +1039,12 @@ static int dccp_v6_init_sock(struct sock *sk)
if (unlikely(!dccp_v6_ctl_sock_initialized))
dccp_v6_ctl_sock_initialized = 1;
inet_csk(sk)->icsk_af_ops = &dccp_ipv6_af_ops;
+ sk->sk_destruct = dccp_v6_sk_destruct;
}
return err;
}
-static void dccp_v6_destroy_sock(struct sock *sk)
-{
- dccp_destroy_sock(sk);
- inet6_destroy_sock(sk);
-}
-
static struct timewait_sock_ops dccp6_timewait_sock_ops = {
.twsk_obj_size = sizeof(struct dccp6_timewait_sock),
};
@@ -1066,7 +1067,7 @@ static struct proto dccp_v6_prot = {
.accept = inet_csk_accept,
.get_port = inet_csk_get_port,
.shutdown = dccp_shutdown,
- .destroy = dccp_v6_destroy_sock,
+ .destroy = dccp_destroy_sock,
.orphan_count = &dccp_orphan_count,
.max_header = MAX_DCCP_HEADER,
.obj_size = sizeof(struct dccp6_sock),
diff --git a/net/dccp/proto.c b/net/dccp/proto.c
index c548ca3e9b0e..9494b0d224f9 100644
--- a/net/dccp/proto.c
+++ b/net/dccp/proto.c
@@ -171,12 +171,18 @@ const char *dccp_packet_name(const int type)
EXPORT_SYMBOL_GPL(dccp_packet_name);
-static void dccp_sk_destruct(struct sock *sk)
+void dccp_destruct_common(struct sock *sk)
{
struct dccp_sock *dp = dccp_sk(sk);
ccid_hc_tx_delete(dp->dccps_hc_tx_ccid, sk);
dp->dccps_hc_tx_ccid = NULL;
+}
+EXPORT_SYMBOL_GPL(dccp_destruct_common);
+
+static void dccp_sk_destruct(struct sock *sk)
+{
+ dccp_destruct_common(sk);
inet_sock_destruct(sk);
}
diff --git a/net/dsa/dsa2.c b/net/dsa/dsa2.c
index e504a18fc125..b80dbd02e154 100644
--- a/net/dsa/dsa2.c
+++ b/net/dsa/dsa2.c
@@ -529,7 +529,6 @@ static void dsa_port_devlink_teardown(struct dsa_port *dp)
static int dsa_port_setup(struct dsa_port *dp)
{
- struct devlink_port *dlp = &dp->devlink_port;
bool dsa_port_link_registered = false;
struct dsa_switch *ds = dp->ds;
bool dsa_port_enabled = false;
@@ -585,10 +584,6 @@ static int dsa_port_setup(struct dsa_port *dp)
case DSA_PORT_TYPE_USER:
of_get_mac_address(dp->dn, dp->mac);
err = dsa_slave_create(dp);
- if (err)
- break;
-
- devlink_port_type_eth_set(dlp, dp->slave);
break;
}
@@ -608,13 +603,9 @@ static int dsa_port_setup(struct dsa_port *dp)
static void dsa_port_teardown(struct dsa_port *dp)
{
- struct devlink_port *dlp = &dp->devlink_port;
-
if (!dp->setup)
return;
- devlink_port_type_clear(dlp);
-
switch (dp->type) {
case DSA_PORT_TYPE_UNUSED:
break;
diff --git a/net/dsa/slave.c b/net/dsa/slave.c
index a9fde48cffd4..4176482cd03f 100644
--- a/net/dsa/slave.c
+++ b/net/dsa/slave.c
@@ -976,12 +976,12 @@ static void dsa_slave_get_ethtool_stats(struct net_device *dev,
s = per_cpu_ptr(dev->tstats, i);
do {
- start = u64_stats_fetch_begin_irq(&s->syncp);
+ start = u64_stats_fetch_begin(&s->syncp);
tx_packets = u64_stats_read(&s->tx_packets);
tx_bytes = u64_stats_read(&s->tx_bytes);
rx_packets = u64_stats_read(&s->rx_packets);
rx_bytes = u64_stats_read(&s->rx_bytes);
- } while (u64_stats_fetch_retry_irq(&s->syncp, start));
+ } while (u64_stats_fetch_retry(&s->syncp, start));
data[0] += tx_packets;
data[1] += tx_bytes;
data[2] += rx_packets;
@@ -2165,13 +2165,6 @@ static const struct dcbnl_rtnl_ops __maybe_unused dsa_slave_dcbnl_ops = {
.ieee_delapp = dsa_slave_dcbnl_ieee_delapp,
};
-static struct devlink_port *dsa_slave_get_devlink_port(struct net_device *dev)
-{
- struct dsa_port *dp = dsa_slave_to_port(dev);
-
- return &dp->devlink_port;
-}
-
static void dsa_slave_get_stats64(struct net_device *dev,
struct rtnl_link_stats64 *s)
{
@@ -2219,7 +2212,6 @@ static const struct net_device_ops dsa_slave_netdev_ops = {
.ndo_get_stats64 = dsa_slave_get_stats64,
.ndo_vlan_rx_add_vid = dsa_slave_vlan_rx_add_vid,
.ndo_vlan_rx_kill_vid = dsa_slave_vlan_rx_kill_vid,
- .ndo_get_devlink_port = dsa_slave_get_devlink_port,
.ndo_change_mtu = dsa_slave_change_mtu,
.ndo_fill_forward_path = dsa_slave_fill_forward_path,
};
@@ -2406,6 +2398,7 @@ int dsa_slave_create(struct dsa_port *port)
SET_NETDEV_DEVTYPE(slave_dev, &dsa_type);
SET_NETDEV_DEV(slave_dev, port->ds->dev);
+ SET_NETDEV_DEVLINK_PORT(slave_dev, &port->devlink_port);
slave_dev->dev.of_node = port->dn;
slave_dev->vlan_features = master->vlan_features;
diff --git a/net/ethtool/channels.c b/net/ethtool/channels.c
index 403158862011..c7e37130647e 100644
--- a/net/ethtool/channels.c
+++ b/net/ethtool/channels.c
@@ -116,9 +116,10 @@ int ethnl_set_channels(struct sk_buff *skb, struct genl_info *info)
struct ethtool_channels channels = {};
struct ethnl_req_info req_info = {};
struct nlattr **tb = info->attrs;
- u32 err_attr, max_rx_in_use = 0;
+ u32 err_attr, max_rxfh_in_use;
const struct ethtool_ops *ops;
struct net_device *dev;
+ u64 max_rxnfc_in_use;
int ret;
ret = ethnl_parse_header_dev_get(&req_info,
@@ -189,15 +190,23 @@ int ethnl_set_channels(struct sk_buff *skb, struct genl_info *info)
}
/* ensure the new Rx count fits within the configured Rx flow
- * indirection table settings
+ * indirection table/rxnfc settings
*/
- if (netif_is_rxfh_configured(dev) &&
- !ethtool_get_max_rxfh_channel(dev, &max_rx_in_use) &&
- (channels.combined_count + channels.rx_count) <= max_rx_in_use) {
+ if (ethtool_get_max_rxnfc_channel(dev, &max_rxnfc_in_use))
+ max_rxnfc_in_use = 0;
+ if (!netif_is_rxfh_configured(dev) ||
+ ethtool_get_max_rxfh_channel(dev, &max_rxfh_in_use))
+ max_rxfh_in_use = 0;
+ if (channels.combined_count + channels.rx_count <= max_rxfh_in_use) {
ret = -EINVAL;
GENL_SET_ERR_MSG(info, "requested channel counts are too low for existing indirection table settings");
goto out_ops;
}
+ if (channels.combined_count + channels.rx_count <= max_rxnfc_in_use) {
+ ret = -EINVAL;
+ GENL_SET_ERR_MSG(info, "requested channel counts are too low for existing ntuple filter settings");
+ goto out_ops;
+ }
/* Disabling channels, query zero-copy AF_XDP sockets */
from_channel = channels.combined_count +
diff --git a/net/ethtool/common.c b/net/ethtool/common.c
index 566adf85e658..21cfe8557205 100644
--- a/net/ethtool/common.c
+++ b/net/ethtool/common.c
@@ -202,6 +202,12 @@ const char link_mode_names[][ETH_GSTRING_LEN] = {
__DEFINE_LINK_MODE_NAME(100, FX, Half),
__DEFINE_LINK_MODE_NAME(100, FX, Full),
__DEFINE_LINK_MODE_NAME(10, T1L, Full),
+ __DEFINE_LINK_MODE_NAME(800000, CR8, Full),
+ __DEFINE_LINK_MODE_NAME(800000, KR8, Full),
+ __DEFINE_LINK_MODE_NAME(800000, DR8, Full),
+ __DEFINE_LINK_MODE_NAME(800000, DR8_2, Full),
+ __DEFINE_LINK_MODE_NAME(800000, SR8, Full),
+ __DEFINE_LINK_MODE_NAME(800000, VR8, Full),
};
static_assert(ARRAY_SIZE(link_mode_names) == __ETHTOOL_LINK_MODE_MASK_NBITS);
@@ -238,6 +244,8 @@ static_assert(ARRAY_SIZE(link_mode_names) == __ETHTOOL_LINK_MODE_MASK_NBITS);
#define __LINK_MODE_LANES_X 1
#define __LINK_MODE_LANES_FX 1
#define __LINK_MODE_LANES_T1L 1
+#define __LINK_MODE_LANES_VR8 8
+#define __LINK_MODE_LANES_DR8_2 8
#define __DEFINE_LINK_MODE_PARAMS(_speed, _type, _duplex) \
[ETHTOOL_LINK_MODE(_speed, _type, _duplex)] = { \
@@ -352,6 +360,12 @@ const struct link_mode_info link_mode_params[] = {
__DEFINE_LINK_MODE_PARAMS(100, FX, Half),
__DEFINE_LINK_MODE_PARAMS(100, FX, Full),
__DEFINE_LINK_MODE_PARAMS(10, T1L, Full),
+ __DEFINE_LINK_MODE_PARAMS(800000, CR8, Full),
+ __DEFINE_LINK_MODE_PARAMS(800000, KR8, Full),
+ __DEFINE_LINK_MODE_PARAMS(800000, DR8, Full),
+ __DEFINE_LINK_MODE_PARAMS(800000, DR8_2, Full),
+ __DEFINE_LINK_MODE_PARAMS(800000, SR8, Full),
+ __DEFINE_LINK_MODE_PARAMS(800000, VR8, Full),
};
static_assert(ARRAY_SIZE(link_mode_params) == __ETHTOOL_LINK_MODE_MASK_NBITS);
@@ -498,6 +512,72 @@ int __ethtool_get_link(struct net_device *dev)
return netif_running(dev) && dev->ethtool_ops->get_link(dev);
}
+static int ethtool_get_rxnfc_rule_count(struct net_device *dev)
+{
+ const struct ethtool_ops *ops = dev->ethtool_ops;
+ struct ethtool_rxnfc info = {
+ .cmd = ETHTOOL_GRXCLSRLCNT,
+ };
+ int err;
+
+ err = ops->get_rxnfc(dev, &info, NULL);
+ if (err)
+ return err;
+
+ return info.rule_cnt;
+}
+
+int ethtool_get_max_rxnfc_channel(struct net_device *dev, u64 *max)
+{
+ const struct ethtool_ops *ops = dev->ethtool_ops;
+ struct ethtool_rxnfc *info;
+ int err, i, rule_cnt;
+ u64 max_ring = 0;
+
+ if (!ops->get_rxnfc)
+ return -EOPNOTSUPP;
+
+ rule_cnt = ethtool_get_rxnfc_rule_count(dev);
+ if (rule_cnt <= 0)
+ return -EINVAL;
+
+ info = kvzalloc(struct_size(info, rule_locs, rule_cnt), GFP_KERNEL);
+ if (!info)
+ return -ENOMEM;
+
+ info->cmd = ETHTOOL_GRXCLSRLALL;
+ info->rule_cnt = rule_cnt;
+ err = ops->get_rxnfc(dev, info, info->rule_locs);
+ if (err)
+ goto err_free_info;
+
+ for (i = 0; i < rule_cnt; i++) {
+ struct ethtool_rxnfc rule_info = {
+ .cmd = ETHTOOL_GRXCLSRULE,
+ .fs.location = info->rule_locs[i],
+ };
+
+ err = ops->get_rxnfc(dev, &rule_info, NULL);
+ if (err)
+ goto err_free_info;
+
+ if (rule_info.fs.ring_cookie != RX_CLS_FLOW_DISC &&
+ rule_info.fs.ring_cookie != RX_CLS_FLOW_WAKE &&
+ !(rule_info.flow_type & FLOW_RSS) &&
+ !ethtool_get_flow_spec_ring_vf(rule_info.fs.ring_cookie))
+ max_ring =
+ max_t(u64, max_ring, rule_info.fs.ring_cookie);
+ }
+
+ kvfree(info);
+ *max = max_ring;
+ return 0;
+
+err_free_info:
+ kvfree(info);
+ return err;
+}
+
int ethtool_get_max_rxfh_channel(struct net_device *dev, u32 *max)
{
u32 dev_size, current_max = 0;
diff --git a/net/ethtool/common.h b/net/ethtool/common.h
index c1779657e074..b1b9db810eca 100644
--- a/net/ethtool/common.h
+++ b/net/ethtool/common.h
@@ -43,6 +43,7 @@ bool convert_legacy_settings_to_link_ksettings(
struct ethtool_link_ksettings *link_ksettings,
const struct ethtool_cmd *legacy_settings);
int ethtool_get_max_rxfh_channel(struct net_device *dev, u32 *max);
+int ethtool_get_max_rxnfc_channel(struct net_device *dev, u64 *max);
int __ethtool_get_ts_info(struct net_device *dev, struct ethtool_ts_info *info);
extern const struct ethtool_phy_ops *ethtool_phy_ops;
diff --git a/net/ethtool/ioctl.c b/net/ethtool/ioctl.c
index 57e7238a4136..99272a67525c 100644
--- a/net/ethtool/ioctl.c
+++ b/net/ethtool/ioctl.c
@@ -44,16 +44,9 @@ struct ethtool_devlink_compat {
static struct devlink *netdev_to_devlink_get(struct net_device *dev)
{
- struct devlink_port *devlink_port;
-
- if (!dev->netdev_ops->ndo_get_devlink_port)
- return NULL;
-
- devlink_port = dev->netdev_ops->ndo_get_devlink_port(dev);
- if (!devlink_port)
+ if (!dev->devlink_port)
return NULL;
-
- return devlink_try_get(devlink_port->devlink);
+ return devlink_try_get(dev->devlink_port->devlink);
}
/*
@@ -713,15 +706,22 @@ static int
ethtool_get_drvinfo(struct net_device *dev, struct ethtool_devlink_compat *rsp)
{
const struct ethtool_ops *ops = dev->ethtool_ops;
+ struct device *parent = dev->dev.parent;
rsp->info.cmd = ETHTOOL_GDRVINFO;
strscpy(rsp->info.version, UTS_RELEASE, sizeof(rsp->info.version));
if (ops->get_drvinfo) {
ops->get_drvinfo(dev, &rsp->info);
- } else if (dev->dev.parent && dev->dev.parent->driver) {
- strscpy(rsp->info.bus_info, dev_name(dev->dev.parent),
+ if (!rsp->info.bus_info[0] && parent)
+ strscpy(rsp->info.bus_info, dev_name(parent),
+ sizeof(rsp->info.bus_info));
+ if (!rsp->info.driver[0] && parent && parent->driver)
+ strscpy(rsp->info.driver, parent->driver->name,
+ sizeof(rsp->info.driver));
+ } else if (parent && parent->driver) {
+ strscpy(rsp->info.bus_info, dev_name(parent),
sizeof(rsp->info.bus_info));
- strscpy(rsp->info.driver, dev->dev.parent->driver->name,
+ strscpy(rsp->info.driver, parent->driver->name,
sizeof(rsp->info.driver));
} else if (dev->rtnl_link_ops) {
strscpy(rsp->info.driver, dev->rtnl_link_ops->kind,
@@ -1796,7 +1796,8 @@ static noinline_for_stack int ethtool_set_channels(struct net_device *dev,
{
struct ethtool_channels channels, curr = { .cmd = ETHTOOL_GCHANNELS };
u16 from_channel, to_channel;
- u32 max_rx_in_use = 0;
+ u64 max_rxnfc_in_use;
+ u32 max_rxfh_in_use;
unsigned int i;
int ret;
@@ -1827,11 +1828,15 @@ static noinline_for_stack int ethtool_set_channels(struct net_device *dev,
return -EINVAL;
/* ensure the new Rx count fits within the configured Rx flow
- * indirection table settings */
- if (netif_is_rxfh_configured(dev) &&
- !ethtool_get_max_rxfh_channel(dev, &max_rx_in_use) &&
- (channels.combined_count + channels.rx_count) <= max_rx_in_use)
- return -EINVAL;
+ * indirection table/rxnfc settings */
+ if (ethtool_get_max_rxnfc_channel(dev, &max_rxnfc_in_use))
+ max_rxnfc_in_use = 0;
+ if (!netif_is_rxfh_configured(dev) ||
+ ethtool_get_max_rxfh_channel(dev, &max_rxfh_in_use))
+ max_rxfh_in_use = 0;
+ if (channels.combined_count + channels.rx_count <=
+ max_t(u64, max_rxnfc_in_use, max_rxfh_in_use))
+ return -EINVAL;
/* Disabling channels, query zero-copy AF_XDP sockets */
from_channel = channels.combined_count +
diff --git a/net/ethtool/linkstate.c b/net/ethtool/linkstate.c
index fb676f349455..2158c17a0b32 100644
--- a/net/ethtool/linkstate.c
+++ b/net/ethtool/linkstate.c
@@ -13,6 +13,7 @@ struct linkstate_reply_data {
int link;
int sqi;
int sqi_max;
+ struct ethtool_link_ext_stats link_stats;
bool link_ext_state_provided;
struct ethtool_link_ext_state_info ethtool_link_ext_state_info;
};
@@ -22,7 +23,7 @@ struct linkstate_reply_data {
const struct nla_policy ethnl_linkstate_get_policy[] = {
[ETHTOOL_A_LINKSTATE_HEADER] =
- NLA_POLICY_NESTED(ethnl_header_policy),
+ NLA_POLICY_NESTED(ethnl_header_policy_stats),
};
static int linkstate_get_sqi(struct net_device *dev)
@@ -107,6 +108,19 @@ static int linkstate_prepare_data(const struct ethnl_req_info *req_base,
goto out;
}
+ ethtool_stats_init((u64 *)&data->link_stats,
+ sizeof(data->link_stats) / 8);
+
+ if (req_base->flags & ETHTOOL_FLAG_STATS) {
+ if (dev->phydev)
+ data->link_stats.link_down_events =
+ READ_ONCE(dev->phydev->link_down_events);
+
+ if (dev->ethtool_ops->get_link_ext_stats)
+ dev->ethtool_ops->get_link_ext_stats(dev,
+ &data->link_stats);
+ }
+
ret = 0;
out:
ethnl_ops_complete(dev);
@@ -134,6 +148,9 @@ static int linkstate_reply_size(const struct ethnl_req_info *req_base,
if (data->ethtool_link_ext_state_info.__link_ext_substate)
len += nla_total_size(sizeof(u8)); /* LINKSTATE_EXT_SUBSTATE */
+ if (data->link_stats.link_down_events != ETHTOOL_STAT_NOT_SET)
+ len += nla_total_size(sizeof(u32));
+
return len;
}
@@ -166,6 +183,11 @@ static int linkstate_fill_reply(struct sk_buff *skb,
return -EMSGSIZE;
}
+ if (data->link_stats.link_down_events != ETHTOOL_STAT_NOT_SET)
+ if (nla_put_u32(skb, ETHTOOL_A_LINKSTATE_EXT_DOWN_CNT,
+ data->link_stats.link_down_events))
+ return -EMSGSIZE;
+
return 0;
}
diff --git a/net/ieee802154/core.c b/net/ieee802154/core.c
index de259b5170ab..57546e07e06a 100644
--- a/net/ieee802154/core.c
+++ b/net/ieee802154/core.c
@@ -129,6 +129,9 @@ wpan_phy_new(const struct cfg802154_ops *ops, size_t priv_size)
wpan_phy_net_set(&rdev->wpan_phy, &init_net);
init_waitqueue_head(&rdev->dev_wait);
+ init_waitqueue_head(&rdev->wpan_phy.sync_txq);
+
+ spin_lock_init(&rdev->wpan_phy.queue_lock);
return &rdev->wpan_phy;
}
diff --git a/net/ieee802154/nl802154.c b/net/ieee802154/nl802154.c
index 38c4f3cb010e..b33d1b5eda87 100644
--- a/net/ieee802154/nl802154.c
+++ b/net/ieee802154/nl802154.c
@@ -2157,7 +2157,8 @@ static int nl802154_del_llsec_seclevel(struct sk_buff *skb,
#define NL802154_FLAG_CHECK_NETDEV_UP 0x08
#define NL802154_FLAG_NEED_WPAN_DEV 0x10
-static int nl802154_pre_doit(const struct genl_ops *ops, struct sk_buff *skb,
+static int nl802154_pre_doit(const struct genl_split_ops *ops,
+ struct sk_buff *skb,
struct genl_info *info)
{
struct cfg802154_registered_device *rdev;
@@ -2219,7 +2220,8 @@ static int nl802154_pre_doit(const struct genl_ops *ops, struct sk_buff *skb,
return 0;
}
-static void nl802154_post_doit(const struct genl_ops *ops, struct sk_buff *skb,
+static void nl802154_post_doit(const struct genl_split_ops *ops,
+ struct sk_buff *skb,
struct genl_info *info)
{
if (info->user_ptr[1]) {
diff --git a/net/ipv4/Makefile b/net/ipv4/Makefile
index bbdd9c44f14e..af7d2cf490fb 100644
--- a/net/ipv4/Makefile
+++ b/net/ipv4/Makefile
@@ -10,7 +10,7 @@ obj-y := route.o inetpeer.o protocol.o \
tcp.o tcp_input.o tcp_output.o tcp_timer.o tcp_ipv4.o \
tcp_minisocks.o tcp_cong.o tcp_metrics.o tcp_fastopen.o \
tcp_rate.o tcp_recovery.o tcp_ulp.o \
- tcp_offload.o datagram.o raw.o udp.o udplite.o \
+ tcp_offload.o tcp_plb.o datagram.o raw.o udp.o udplite.o \
udp_offload.o arp.o icmp.o devinet.o af_inet.o igmp.o \
fib_frontend.o fib_semantics.o fib_trie.o fib_notifier.o \
inet_fragment.o ping.o ip_tunnel_core.o gre_offload.o \
diff --git a/net/ipv4/af_inet.c b/net/ipv4/af_inet.c
index 4728087c42a5..378bcd777514 100644
--- a/net/ipv4/af_inet.c
+++ b/net/ipv4/af_inet.c
@@ -1708,9 +1708,9 @@ u64 snmp_get_cpu_field64(void __percpu *mib, int cpu, int offt,
bhptr = per_cpu_ptr(mib, cpu);
syncp = (struct u64_stats_sync *)(bhptr + syncp_offset);
do {
- start = u64_stats_fetch_begin_irq(syncp);
+ start = u64_stats_fetch_begin(syncp);
v = *(((u64 *)bhptr) + offt);
- } while (u64_stats_fetch_retry_irq(syncp, start));
+ } while (u64_stats_fetch_retry(syncp, start));
return v;
}
diff --git a/net/ipv4/inet_fragment.c b/net/ipv4/inet_fragment.c
index c9f9ac5013a7..7072fc0783ef 100644
--- a/net/ipv4/inet_fragment.c
+++ b/net/ipv4/inet_fragment.c
@@ -133,6 +133,7 @@ static void inet_frags_free_cb(void *ptr, void *arg)
count = del_timer_sync(&fq->timer) ? 1 : 0;
spin_lock_bh(&fq->lock);
+ fq->flags |= INET_FRAG_DROP;
if (!(fq->flags & INET_FRAG_COMPLETE)) {
fq->flags |= INET_FRAG_COMPLETE;
count++;
@@ -260,7 +261,8 @@ static void inet_frag_destroy_rcu(struct rcu_head *head)
kmem_cache_free(f->frags_cachep, q);
}
-unsigned int inet_frag_rbtree_purge(struct rb_root *root)
+unsigned int inet_frag_rbtree_purge(struct rb_root *root,
+ enum skb_drop_reason reason)
{
struct rb_node *p = rb_first(root);
unsigned int sum = 0;
@@ -274,7 +276,7 @@ unsigned int inet_frag_rbtree_purge(struct rb_root *root)
struct sk_buff *next = FRAG_CB(skb)->next_frag;
sum += skb->truesize;
- kfree_skb(skb);
+ kfree_skb_reason(skb, reason);
skb = next;
}
}
@@ -284,17 +286,21 @@ EXPORT_SYMBOL(inet_frag_rbtree_purge);
void inet_frag_destroy(struct inet_frag_queue *q)
{
- struct fqdir *fqdir;
unsigned int sum, sum_truesize = 0;
+ enum skb_drop_reason reason;
struct inet_frags *f;
+ struct fqdir *fqdir;
WARN_ON(!(q->flags & INET_FRAG_COMPLETE));
+ reason = (q->flags & INET_FRAG_DROP) ?
+ SKB_DROP_REASON_FRAG_REASM_TIMEOUT :
+ SKB_CONSUMED;
WARN_ON(del_timer(&q->timer) != 0);
/* Release all fragment data. */
fqdir = q->fqdir;
f = fqdir->f;
- sum_truesize = inet_frag_rbtree_purge(&q->rb_fragments);
+ sum_truesize = inet_frag_rbtree_purge(&q->rb_fragments, reason);
sum = sum_truesize + f->qsize;
call_rcu(&q->rcu, inet_frag_destroy_rcu);
diff --git a/net/ipv4/ip_fragment.c b/net/ipv4/ip_fragment.c
index fb153569889e..69c00ffdcf3e 100644
--- a/net/ipv4/ip_fragment.c
+++ b/net/ipv4/ip_fragment.c
@@ -153,6 +153,7 @@ static void ip_expire(struct timer_list *t)
if (qp->q.flags & INET_FRAG_COMPLETE)
goto out;
+ qp->q.flags |= INET_FRAG_DROP;
ipq_kill(qp);
__IP_INC_STATS(net, IPSTATS_MIB_REASMFAILS);
__IP_INC_STATS(net, IPSTATS_MIB_REASMTIMEOUT);
@@ -194,7 +195,7 @@ out:
spin_unlock(&qp->q.lock);
out_rcu_unlock:
rcu_read_unlock();
- kfree_skb(head);
+ kfree_skb_reason(head, SKB_DROP_REASON_FRAG_REASM_TIMEOUT);
ipq_put(qp);
}
@@ -254,7 +255,8 @@ static int ip_frag_reinit(struct ipq *qp)
return -ETIMEDOUT;
}
- sum_truesize = inet_frag_rbtree_purge(&qp->q.rb_fragments);
+ sum_truesize = inet_frag_rbtree_purge(&qp->q.rb_fragments,
+ SKB_DROP_REASON_FRAG_TOO_FAR);
sub_frag_mem_limit(qp->q.fqdir, sum_truesize);
qp->q.flags = 0;
@@ -278,10 +280,14 @@ static int ip_frag_queue(struct ipq *qp, struct sk_buff *skb)
struct net_device *dev;
unsigned int fragsize;
int err = -ENOENT;
+ SKB_DR(reason);
u8 ecn;
- if (qp->q.flags & INET_FRAG_COMPLETE)
+ /* If reassembly is already done, @skb must be a duplicate frag. */
+ if (qp->q.flags & INET_FRAG_COMPLETE) {
+ SKB_DR_SET(reason, DUP_FRAG);
goto err;
+ }
if (!(IPCB(skb)->flags & IPSKB_FRAG_COMPLETE) &&
unlikely(ip_frag_too_far(qp)) &&
@@ -382,8 +388,9 @@ static int ip_frag_queue(struct ipq *qp, struct sk_buff *skb)
insert_error:
if (err == IPFRAG_DUP) {
- kfree_skb(skb);
- return -EINVAL;
+ SKB_DR_SET(reason, DUP_FRAG);
+ err = -EINVAL;
+ goto err;
}
err = -EINVAL;
__IP_INC_STATS(net, IPSTATS_MIB_REASM_OVERLAPS);
@@ -391,7 +398,7 @@ discard_qp:
inet_frag_kill(&qp->q);
__IP_INC_STATS(net, IPSTATS_MIB_REASMFAILS);
err:
- kfree_skb(skb);
+ kfree_skb_reason(skb, reason);
return err;
}
diff --git a/net/ipv4/ip_gre.c b/net/ipv4/ip_gre.c
index f866d6282b2b..d8ee5238c395 100644
--- a/net/ipv4/ip_gre.c
+++ b/net/ipv4/ip_gre.c
@@ -1665,7 +1665,7 @@ struct net_device *gretap_fb_dev_create(struct net *net, const char *name,
if (err)
goto out;
- err = rtnl_configure_link(dev, NULL);
+ err = rtnl_configure_link(dev, NULL, 0, NULL);
if (err < 0)
goto out;
diff --git a/net/ipv4/ip_sockglue.c b/net/ipv4/ip_sockglue.c
index 6e19cad154f5..9f92ae35bb01 100644
--- a/net/ipv4/ip_sockglue.c
+++ b/net/ipv4/ip_sockglue.c
@@ -267,7 +267,7 @@ int ip_cmsg_send(struct sock *sk, struct msghdr *msg, struct ipcm_cookie *ipc,
}
#endif
if (cmsg->cmsg_level == SOL_SOCKET) {
- err = __sock_cmsg_send(sk, msg, cmsg, &ipc->sockc);
+ err = __sock_cmsg_send(sk, cmsg, &ipc->sockc);
if (err)
return err;
continue;
@@ -433,6 +433,7 @@ void ip_icmp_error(struct sock *sk, struct sk_buff *skb, int err,
}
kfree_skb(skb);
}
+EXPORT_SYMBOL_GPL(ip_icmp_error);
void ip_local_error(struct sock *sk, int err, __be32 daddr, __be16 port, u32 info)
{
diff --git a/net/ipv4/metrics.c b/net/ipv4/metrics.c
index 25ea6ac44db9..7fcfdfd8f9de 100644
--- a/net/ipv4/metrics.c
+++ b/net/ipv4/metrics.c
@@ -14,9 +14,6 @@ static int ip_metrics_convert(struct net *net, struct nlattr *fc_mx,
struct nlattr *nla;
int remaining;
- if (!fc_mx)
- return 0;
-
nla_for_each_attr(nla, fc_mx, fc_mx_len, remaining) {
int type = nla_type(nla);
u32 val;
diff --git a/net/ipv4/proc.c b/net/ipv4/proc.c
index 5386f460bd20..f88daace9de3 100644
--- a/net/ipv4/proc.c
+++ b/net/ipv4/proc.c
@@ -297,6 +297,7 @@ static const struct snmp_mib snmp4_net_list[] = {
SNMP_MIB_ITEM("TCPDSACKIgnoredDubious", LINUX_MIB_TCPDSACKIGNOREDDUBIOUS),
SNMP_MIB_ITEM("TCPMigrateReqSuccess", LINUX_MIB_TCPMIGRATEREQSUCCESS),
SNMP_MIB_ITEM("TCPMigrateReqFailure", LINUX_MIB_TCPMIGRATEREQFAILURE),
+ SNMP_MIB_ITEM("TCPPLBRehash", LINUX_MIB_TCPPLBREHASH),
SNMP_MIB_SENTINEL
};
diff --git a/net/ipv4/sysctl_net_ipv4.c b/net/ipv4/sysctl_net_ipv4.c
index 9b8a6db7a66b..0af28cedd071 100644
--- a/net/ipv4/sysctl_net_ipv4.c
+++ b/net/ipv4/sysctl_net_ipv4.c
@@ -40,6 +40,8 @@ static int one_day_secs = 24 * 3600;
static u32 fib_multipath_hash_fields_all_mask __maybe_unused =
FIB_MULTIPATH_HASH_FIELD_ALL_MASK;
static unsigned int tcp_child_ehash_entries_max = 16 * 1024 * 1024;
+static int tcp_plb_max_rounds = 31;
+static int tcp_plb_max_cong_thresh = 256;
/* obsolete */
static int sysctl_tcp_low_latency __read_mostly;
@@ -1384,6 +1386,47 @@ static struct ctl_table ipv4_net_table[] = {
.extra1 = SYSCTL_ZERO,
.extra2 = SYSCTL_TWO,
},
+ {
+ .procname = "tcp_plb_enabled",
+ .data = &init_net.ipv4.sysctl_tcp_plb_enabled,
+ .maxlen = sizeof(u8),
+ .mode = 0644,
+ .proc_handler = proc_dou8vec_minmax,
+ .extra1 = SYSCTL_ZERO,
+ .extra2 = SYSCTL_ONE,
+ },
+ {
+ .procname = "tcp_plb_idle_rehash_rounds",
+ .data = &init_net.ipv4.sysctl_tcp_plb_idle_rehash_rounds,
+ .maxlen = sizeof(u8),
+ .mode = 0644,
+ .proc_handler = proc_dou8vec_minmax,
+ .extra2 = &tcp_plb_max_rounds,
+ },
+ {
+ .procname = "tcp_plb_rehash_rounds",
+ .data = &init_net.ipv4.sysctl_tcp_plb_rehash_rounds,
+ .maxlen = sizeof(u8),
+ .mode = 0644,
+ .proc_handler = proc_dou8vec_minmax,
+ .extra2 = &tcp_plb_max_rounds,
+ },
+ {
+ .procname = "tcp_plb_suspend_rto_sec",
+ .data = &init_net.ipv4.sysctl_tcp_plb_suspend_rto_sec,
+ .maxlen = sizeof(u8),
+ .mode = 0644,
+ .proc_handler = proc_dou8vec_minmax,
+ },
+ {
+ .procname = "tcp_plb_cong_thresh",
+ .data = &init_net.ipv4.sysctl_tcp_plb_cong_thresh,
+ .maxlen = sizeof(int),
+ .mode = 0644,
+ .proc_handler = proc_dointvec_minmax,
+ .extra1 = SYSCTL_ZERO,
+ .extra2 = &tcp_plb_max_cong_thresh,
+ },
{ }
};
diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c
index 54836a6b81d6..4a69c5fcfedc 100644
--- a/net/ipv4/tcp.c
+++ b/net/ipv4/tcp.c
@@ -3176,6 +3176,7 @@ int tcp_disconnect(struct sock *sk, int flags)
tp->sacked_out = 0;
tp->tlp_high_seq = 0;
tp->last_oow_ack_time = 0;
+ tp->plb_rehash = 0;
/* There's a bubble in the pipe until at least the first ACK. */
tp->app_limited = ~0U;
tp->rack.mstamp = 0;
@@ -3939,6 +3940,8 @@ void tcp_get_info(struct sock *sk, struct tcp_info *info)
info->tcpi_reord_seen = tp->reord_seen;
info->tcpi_rcv_ooopack = tp->rcv_ooopack;
info->tcpi_snd_wnd = tp->snd_wnd;
+ info->tcpi_rcv_wnd = tp->rcv_wnd;
+ info->tcpi_rehash = tp->plb_rehash + tp->timeout_rehash;
info->tcpi_fastopen_client_fail = tp->fastopen_client_fail;
unlock_sock_fast(sk, slow);
}
@@ -3973,6 +3976,7 @@ static size_t tcp_opt_stats_get_size(void)
nla_total_size(sizeof(u32)) + /* TCP_NLA_BYTES_NOTSENT */
nla_total_size_64bit(sizeof(u64)) + /* TCP_NLA_EDT */
nla_total_size(sizeof(u8)) + /* TCP_NLA_TTL */
+ nla_total_size(sizeof(u32)) + /* TCP_NLA_REHASH */
0;
}
@@ -4049,6 +4053,7 @@ struct sk_buff *tcp_get_timestamping_opt_stats(const struct sock *sk,
nla_put_u8(stats, TCP_NLA_TTL,
tcp_skb_ttl_or_hop_limit(ack_skb));
+ nla_put_u32(stats, TCP_NLA_REHASH, tp->plb_rehash + tp->timeout_rehash);
return stats;
}
diff --git a/net/ipv4/tcp_dctcp.c b/net/ipv4/tcp_dctcp.c
index 2a6c0dd665a4..e0a2ca7456ff 100644
--- a/net/ipv4/tcp_dctcp.c
+++ b/net/ipv4/tcp_dctcp.c
@@ -54,6 +54,7 @@ struct dctcp {
u32 next_seq;
u32 ce_state;
u32 loss_cwnd;
+ struct tcp_plb_state plb;
};
static unsigned int dctcp_shift_g __read_mostly = 4; /* g = 1/2^4 */
@@ -91,6 +92,8 @@ static void dctcp_init(struct sock *sk)
ca->ce_state = 0;
dctcp_reset(tp, ca);
+ tcp_plb_init(sk, &ca->plb);
+
return;
}
@@ -117,14 +120,28 @@ static void dctcp_update_alpha(struct sock *sk, u32 flags)
/* Expired RTT */
if (!before(tp->snd_una, ca->next_seq)) {
+ u32 delivered = tp->delivered - ca->old_delivered;
u32 delivered_ce = tp->delivered_ce - ca->old_delivered_ce;
u32 alpha = ca->dctcp_alpha;
+ u32 ce_ratio = 0;
+
+ if (delivered > 0) {
+ /* dctcp_alpha keeps EWMA of fraction of ECN marked
+ * packets. Because of EWMA smoothing, PLB reaction can
+ * be slow so we use ce_ratio which is an instantaneous
+ * measure of congestion. ce_ratio is the fraction of
+ * ECN marked packets in the previous RTT.
+ */
+ if (delivered_ce > 0)
+ ce_ratio = (delivered_ce << TCP_PLB_SCALE) / delivered;
+ tcp_plb_update_state(sk, &ca->plb, (int)ce_ratio);
+ tcp_plb_check_rehash(sk, &ca->plb);
+ }
/* alpha = (1 - g) * alpha + g * F */
alpha -= min_not_zero(alpha, alpha >> dctcp_shift_g);
if (delivered_ce) {
- u32 delivered = tp->delivered - ca->old_delivered;
/* If dctcp_shift_g == 1, a 32bit value would overflow
* after 8 M packets.
@@ -172,8 +189,12 @@ static void dctcp_cwnd_event(struct sock *sk, enum tcp_ca_event ev)
dctcp_ece_ack_update(sk, ev, &ca->prior_rcv_nxt, &ca->ce_state);
break;
case CA_EVENT_LOSS:
+ tcp_plb_update_state_upon_rto(sk, &ca->plb);
dctcp_react_to_loss(sk);
break;
+ case CA_EVENT_TX_START:
+ tcp_plb_check_rehash(sk, &ca->plb); /* Maybe rehash when inflight is 0 */
+ break;
default:
/* Don't care for the rest. */
break;
diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
index 0640453fce54..d764b5854dfc 100644
--- a/net/ipv4/tcp_input.c
+++ b/net/ipv4/tcp_input.c
@@ -4764,8 +4764,8 @@ static void tcp_ofo_queue(struct sock *sk)
}
}
-static bool tcp_prune_ofo_queue(struct sock *sk);
-static int tcp_prune_queue(struct sock *sk);
+static bool tcp_prune_ofo_queue(struct sock *sk, const struct sk_buff *in_skb);
+static int tcp_prune_queue(struct sock *sk, const struct sk_buff *in_skb);
static int tcp_try_rmem_schedule(struct sock *sk, struct sk_buff *skb,
unsigned int size)
@@ -4773,11 +4773,11 @@ static int tcp_try_rmem_schedule(struct sock *sk, struct sk_buff *skb,
if (atomic_read(&sk->sk_rmem_alloc) > sk->sk_rcvbuf ||
!sk_rmem_schedule(sk, skb, size)) {
- if (tcp_prune_queue(sk) < 0)
+ if (tcp_prune_queue(sk, skb) < 0)
return -1;
while (!sk_rmem_schedule(sk, skb, size)) {
- if (!tcp_prune_ofo_queue(sk))
+ if (!tcp_prune_ofo_queue(sk, skb))
return -1;
}
}
@@ -5329,6 +5329,8 @@ new_range:
* Clean the out-of-order queue to make room.
* We drop high sequences packets to :
* 1) Let a chance for holes to be filled.
+ * This means we do not drop packets from ooo queue if their sequence
+ * is before incoming packet sequence.
* 2) not add too big latencies if thousands of packets sit there.
* (But if application shrinks SO_RCVBUF, we could still end up
* freeing whole queue here)
@@ -5336,24 +5338,31 @@ new_range:
*
* Return true if queue has shrunk.
*/
-static bool tcp_prune_ofo_queue(struct sock *sk)
+static bool tcp_prune_ofo_queue(struct sock *sk, const struct sk_buff *in_skb)
{
struct tcp_sock *tp = tcp_sk(sk);
struct rb_node *node, *prev;
+ bool pruned = false;
int goal;
if (RB_EMPTY_ROOT(&tp->out_of_order_queue))
return false;
- NET_INC_STATS(sock_net(sk), LINUX_MIB_OFOPRUNED);
goal = sk->sk_rcvbuf >> 3;
node = &tp->ooo_last_skb->rbnode;
+
do {
+ struct sk_buff *skb = rb_to_skb(node);
+
+ /* If incoming skb would land last in ofo queue, stop pruning. */
+ if (after(TCP_SKB_CB(in_skb)->seq, TCP_SKB_CB(skb)->seq))
+ break;
+ pruned = true;
prev = rb_prev(node);
rb_erase(node, &tp->out_of_order_queue);
- goal -= rb_to_skb(node)->truesize;
- tcp_drop_reason(sk, rb_to_skb(node),
- SKB_DROP_REASON_TCP_OFO_QUEUE_PRUNE);
+ goal -= skb->truesize;
+ tcp_drop_reason(sk, skb, SKB_DROP_REASON_TCP_OFO_QUEUE_PRUNE);
+ tp->ooo_last_skb = rb_to_skb(prev);
if (!prev || goal <= 0) {
if (atomic_read(&sk->sk_rmem_alloc) <= sk->sk_rcvbuf &&
!tcp_under_memory_pressure(sk))
@@ -5362,16 +5371,18 @@ static bool tcp_prune_ofo_queue(struct sock *sk)
}
node = prev;
} while (node);
- tp->ooo_last_skb = rb_to_skb(prev);
- /* Reset SACK state. A conforming SACK implementation will
- * do the same at a timeout based retransmit. When a connection
- * is in a sad state like this, we care only about integrity
- * of the connection not performance.
- */
- if (tp->rx_opt.sack_ok)
- tcp_sack_reset(&tp->rx_opt);
- return true;
+ if (pruned) {
+ NET_INC_STATS(sock_net(sk), LINUX_MIB_OFOPRUNED);
+ /* Reset SACK state. A conforming SACK implementation will
+ * do the same at a timeout based retransmit. When a connection
+ * is in a sad state like this, we care only about integrity
+ * of the connection not performance.
+ */
+ if (tp->rx_opt.sack_ok)
+ tcp_sack_reset(&tp->rx_opt);
+ }
+ return pruned;
}
/* Reduce allocated memory if we can, trying to get
@@ -5381,7 +5392,7 @@ static bool tcp_prune_ofo_queue(struct sock *sk)
* until the socket owning process reads some of the data
* to stabilize the situation.
*/
-static int tcp_prune_queue(struct sock *sk)
+static int tcp_prune_queue(struct sock *sk, const struct sk_buff *in_skb)
{
struct tcp_sock *tp = tcp_sk(sk);
@@ -5408,7 +5419,7 @@ static int tcp_prune_queue(struct sock *sk)
/* Collapsing did not help, destructive actions follow.
* This must not ever occur. */
- tcp_prune_ofo_queue(sk);
+ tcp_prune_ofo_queue(sk, in_skb);
if (atomic_read(&sk->sk_rmem_alloc) <= sk->sk_rcvbuf)
return 0;
diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c
index 87d440f47a70..ebab9e8b184c 100644
--- a/net/ipv4/tcp_ipv4.c
+++ b/net/ipv4/tcp_ipv4.c
@@ -3218,6 +3218,14 @@ static int __net_init tcp_sk_init(struct net *net)
net->ipv4.sysctl_tcp_fastopen_blackhole_timeout = 0;
atomic_set(&net->ipv4.tfo_active_disable_times, 0);
+ /* Set default values for PLB */
+ net->ipv4.sysctl_tcp_plb_enabled = 0; /* Disabled by default */
+ net->ipv4.sysctl_tcp_plb_idle_rehash_rounds = 3;
+ net->ipv4.sysctl_tcp_plb_rehash_rounds = 12;
+ net->ipv4.sysctl_tcp_plb_suspend_rto_sec = 60;
+ /* Default congestion threshold for PLB to mark a round is 50% */
+ net->ipv4.sysctl_tcp_plb_cong_thresh = (1 << TCP_PLB_SCALE) / 2;
+
/* Reno is always built in */
if (!net_eq(net, &init_net) &&
bpf_try_module_get(init_net.ipv4.tcp_congestion_control,
diff --git a/net/ipv4/tcp_plb.c b/net/ipv4/tcp_plb.c
new file mode 100644
index 000000000000..bb1a08fda113
--- /dev/null
+++ b/net/ipv4/tcp_plb.c
@@ -0,0 +1,109 @@
+/* Protective Load Balancing (PLB)
+ *
+ * PLB was designed to reduce link load imbalance across datacenter
+ * switches. PLB is a host-based optimization; it leverages congestion
+ * signals from the transport layer to randomly change the path of the
+ * connection experiencing sustained congestion. PLB prefers to repath
+ * after idle periods to minimize packet reordering. It repaths by
+ * changing the IPv6 Flow Label on the packets of a connection, which
+ * datacenter switches include as part of ECMP/WCMP hashing.
+ *
+ * PLB is described in detail in:
+ *
+ * Mubashir Adnan Qureshi, Yuchung Cheng, Qianwen Yin, Qiaobin Fu,
+ * Gautam Kumar, Masoud Moshref, Junhua Yan, Van Jacobson,
+ * David Wetherall,Abdul Kabbani:
+ * "PLB: Congestion Signals are Simple and Effective for
+ * Network Load Balancing"
+ * In ACM SIGCOMM 2022, Amsterdam Netherlands.
+ *
+ */
+
+#include <net/tcp.h>
+
+/* Called once per round-trip to update PLB state for a connection. */
+void tcp_plb_update_state(const struct sock *sk, struct tcp_plb_state *plb,
+ const int cong_ratio)
+{
+ struct net *net = sock_net(sk);
+
+ if (!READ_ONCE(net->ipv4.sysctl_tcp_plb_enabled))
+ return;
+
+ if (cong_ratio >= 0) {
+ if (cong_ratio < READ_ONCE(net->ipv4.sysctl_tcp_plb_cong_thresh))
+ plb->consec_cong_rounds = 0;
+ else if (plb->consec_cong_rounds <
+ READ_ONCE(net->ipv4.sysctl_tcp_plb_rehash_rounds))
+ plb->consec_cong_rounds++;
+ }
+}
+EXPORT_SYMBOL_GPL(tcp_plb_update_state);
+
+/* Check whether recent congestion has been persistent enough to warrant
+ * a load balancing decision that switches the connection to another path.
+ */
+void tcp_plb_check_rehash(struct sock *sk, struct tcp_plb_state *plb)
+{
+ struct net *net = sock_net(sk);
+ u32 max_suspend;
+ bool forced_rehash = false, idle_rehash = false;
+
+ if (!READ_ONCE(net->ipv4.sysctl_tcp_plb_enabled))
+ return;
+
+ forced_rehash = plb->consec_cong_rounds >=
+ READ_ONCE(net->ipv4.sysctl_tcp_plb_rehash_rounds);
+ /* If sender goes idle then we check whether to rehash. */
+ idle_rehash = READ_ONCE(net->ipv4.sysctl_tcp_plb_idle_rehash_rounds) &&
+ !tcp_sk(sk)->packets_out &&
+ plb->consec_cong_rounds >=
+ READ_ONCE(net->ipv4.sysctl_tcp_plb_idle_rehash_rounds);
+
+ if (!forced_rehash && !idle_rehash)
+ return;
+
+ /* Note that tcp_jiffies32 can wrap; we detect wraps by checking for
+ * cases where the max suspension end is before the actual suspension
+ * end. We clear pause_until to 0 to indicate there is no recent
+ * RTO event that constrains PLB rehashing.
+ */
+ max_suspend = 2 * READ_ONCE(net->ipv4.sysctl_tcp_plb_suspend_rto_sec) * HZ;
+ if (plb->pause_until &&
+ (!before(tcp_jiffies32, plb->pause_until) ||
+ before(tcp_jiffies32 + max_suspend, plb->pause_until)))
+ plb->pause_until = 0;
+
+ if (plb->pause_until)
+ return;
+
+ sk_rethink_txhash(sk);
+ plb->consec_cong_rounds = 0;
+ tcp_sk(sk)->plb_rehash++;
+ NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPPLBREHASH);
+}
+EXPORT_SYMBOL_GPL(tcp_plb_check_rehash);
+
+/* Upon RTO, disallow load balancing for a while, to avoid having load
+ * balancing decisions switch traffic to a black-holed path that was
+ * previously avoided with a sk_rethink_txhash() call at RTO time.
+ */
+void tcp_plb_update_state_upon_rto(struct sock *sk, struct tcp_plb_state *plb)
+{
+ struct net *net = sock_net(sk);
+ u32 pause;
+
+ if (!READ_ONCE(net->ipv4.sysctl_tcp_plb_enabled))
+ return;
+
+ pause = READ_ONCE(net->ipv4.sysctl_tcp_plb_suspend_rto_sec) * HZ;
+ pause += prandom_u32_max(pause);
+ plb->pause_until = tcp_jiffies32 + pause;
+
+ /* Reset PLB state upon RTO, since an RTO causes a sk_rethink_txhash() call
+ * that may switch this connection to a path with completely different
+ * congestion characteristics.
+ */
+ plb->consec_cong_rounds = 0;
+}
+EXPORT_SYMBOL_GPL(tcp_plb_update_state_upon_rto);
diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c
index 6a320a614e54..b859d6c8298e 100644
--- a/net/ipv4/udp.c
+++ b/net/ipv4/udp.c
@@ -784,7 +784,8 @@ int __udp4_lib_err(struct sk_buff *skb, u32 info, struct udp_table *udptable)
if (tunnel) {
/* ...not for tunnels though: we don't have a sending socket */
if (udp_sk(sk)->encap_err_rcv)
- udp_sk(sk)->encap_err_rcv(sk, skb, iph->ihl << 2);
+ udp_sk(sk)->encap_err_rcv(sk, skb, err, uh->dest, info,
+ (u8 *)(uh+1));
goto out;
}
if (!inet->recverr) {
@@ -1448,7 +1449,7 @@ static void udp_rmem_release(struct sock *sk, int size, int partial,
if (likely(partial)) {
up->forward_deficit += size;
size = up->forward_deficit;
- if (size < (sk->sk_rcvbuf >> 2) &&
+ if (size < READ_ONCE(up->forward_threshold) &&
!skb_queue_empty(&up->reader_queue))
return;
} else {
@@ -1622,7 +1623,7 @@ static void udp_destruct_sock(struct sock *sk)
int udp_init_sock(struct sock *sk)
{
- skb_queue_head_init(&udp_sk(sk)->reader_queue);
+ udp_lib_init_sock(sk);
sk->sk_destruct = udp_destruct_sock;
set_bit(SOCK_SUPPORT_ZC, &sk->sk_socket->flags);
return 0;
@@ -2672,6 +2673,18 @@ int udp_lib_setsockopt(struct sock *sk, int level, int optname,
int err = 0;
int is_udplite = IS_UDPLITE(sk);
+ if (level == SOL_SOCKET) {
+ err = sk_setsockopt(sk, level, optname, optval, optlen);
+
+ if (optname == SO_RCVBUF || optname == SO_RCVBUFFORCE) {
+ sockopt_lock_sock(sk);
+ /* paired with READ_ONCE in udp_rmem_release() */
+ WRITE_ONCE(up->forward_threshold, sk->sk_rcvbuf >> 2);
+ sockopt_release_sock(sk);
+ }
+ return err;
+ }
+
if (optlen < sizeof(int))
return -EINVAL;
@@ -2785,7 +2798,7 @@ EXPORT_SYMBOL(udp_lib_setsockopt);
int udp_setsockopt(struct sock *sk, int level, int optname, sockptr_t optval,
unsigned int optlen)
{
- if (level == SOL_UDP || level == SOL_UDPLITE)
+ if (level == SOL_UDP || level == SOL_UDPLITE || level == SOL_SOCKET)
return udp_lib_setsockopt(sk, level, optname,
optval, optlen,
udp_push_pending_frames);
diff --git a/net/ipv6/af_inet6.c b/net/ipv6/af_inet6.c
index 024191004982..68075295d587 100644
--- a/net/ipv6/af_inet6.c
+++ b/net/ipv6/af_inet6.c
@@ -114,6 +114,7 @@ void inet6_sock_destruct(struct sock *sk)
inet6_cleanup_sock(sk);
inet_sock_destruct(sk);
}
+EXPORT_SYMBOL_GPL(inet6_sock_destruct);
static int inet6_create(struct net *net, struct socket *sock, int protocol,
int kern)
@@ -489,7 +490,7 @@ int inet6_release(struct socket *sock)
}
EXPORT_SYMBOL(inet6_release);
-void inet6_destroy_sock(struct sock *sk)
+void inet6_cleanup_sock(struct sock *sk)
{
struct ipv6_pinfo *np = inet6_sk(sk);
struct sk_buff *skb;
@@ -514,12 +515,6 @@ void inet6_destroy_sock(struct sock *sk)
txopt_put(opt);
}
}
-EXPORT_SYMBOL_GPL(inet6_destroy_sock);
-
-void inet6_cleanup_sock(struct sock *sk)
-{
- inet6_destroy_sock(sk);
-}
EXPORT_SYMBOL_GPL(inet6_cleanup_sock);
/*
diff --git a/net/ipv6/datagram.c b/net/ipv6/datagram.c
index 5ecb56522f9d..7c7155b48f17 100644
--- a/net/ipv6/datagram.c
+++ b/net/ipv6/datagram.c
@@ -334,6 +334,7 @@ void ipv6_icmp_error(struct sock *sk, struct sk_buff *skb, int err,
if (sock_queue_err_skb(sk, skb))
kfree_skb(skb);
}
+EXPORT_SYMBOL_GPL(ipv6_icmp_error);
void ipv6_local_error(struct sock *sk, int err, struct flowi6 *fl6, u32 info)
{
@@ -771,7 +772,7 @@ int ip6_datagram_send_ctl(struct net *net, struct sock *sk,
}
if (cmsg->cmsg_level == SOL_SOCKET) {
- err = __sock_cmsg_send(sk, msg, cmsg, &ipc6->sockc);
+ err = __sock_cmsg_send(sk, cmsg, &ipc6->sockc);
if (err)
return err;
continue;
diff --git a/net/ipv6/ip6_gre.c b/net/ipv6/ip6_gre.c
index c035a96fba3a..7673e1dd1147 100644
--- a/net/ipv6/ip6_gre.c
+++ b/net/ipv6/ip6_gre.c
@@ -870,26 +870,6 @@ static inline int ip6gre_xmit_ipv6(struct sk_buff *skb, struct net_device *dev)
return 0;
}
-/**
- * ip6gre_tnl_addr_conflict - compare packet addresses to tunnel's own
- * @t: the outgoing tunnel device
- * @hdr: IPv6 header from the incoming packet
- *
- * Description:
- * Avoid trivial tunneling loop by checking that tunnel exit-point
- * doesn't match source of incoming packet.
- *
- * Return:
- * 1 if conflict,
- * 0 else
- **/
-
-static inline bool ip6gre_tnl_addr_conflict(const struct ip6_tnl *t,
- const struct ipv6hdr *hdr)
-{
- return ipv6_addr_equal(&t->parms.raddr, &hdr->saddr);
-}
-
static int ip6gre_xmit_other(struct sk_buff *skb, struct net_device *dev)
{
struct ip6_tnl *t = netdev_priv(dev);
diff --git a/net/ipv6/ipv6_sockglue.c b/net/ipv6/ipv6_sockglue.c
index 532f4478c884..9ce51680290b 100644
--- a/net/ipv6/ipv6_sockglue.c
+++ b/net/ipv6/ipv6_sockglue.c
@@ -1005,10 +1005,8 @@ unlock:
return retv;
e_inval:
- sockopt_release_sock(sk);
- if (needs_rtnl)
- rtnl_unlock();
- return -EINVAL;
+ retv = -EINVAL;
+ goto unlock;
}
int ipv6_setsockopt(struct sock *sk, int level, int optname, sockptr_t optval,
diff --git a/net/ipv6/netfilter/nf_conntrack_reasm.c b/net/ipv6/netfilter/nf_conntrack_reasm.c
index 38db0064d661..d13240f13607 100644
--- a/net/ipv6/netfilter/nf_conntrack_reasm.c
+++ b/net/ipv6/netfilter/nf_conntrack_reasm.c
@@ -253,7 +253,7 @@ static int nf_ct_frag6_queue(struct frag_queue *fq, struct sk_buff *skb,
if (err) {
if (err == IPFRAG_DUP) {
/* No error for duplicates, pretend they got queued. */
- kfree_skb(skb);
+ kfree_skb_reason(skb, SKB_DROP_REASON_DUP_FRAG);
return -EINPROGRESS;
}
goto insert_error;
diff --git a/net/ipv6/ping.c b/net/ipv6/ping.c
index 86c26e48d065..808983bc2ec9 100644
--- a/net/ipv6/ping.c
+++ b/net/ipv6/ping.c
@@ -23,11 +23,6 @@
#include <linux/bpf-cgroup.h>
#include <net/ping.h>
-static void ping_v6_destroy(struct sock *sk)
-{
- inet6_destroy_sock(sk);
-}
-
/* Compatibility glue so we can support IPv6 when it's compiled as a module */
static int dummy_ipv6_recv_error(struct sock *sk, struct msghdr *msg, int len,
int *addr_len)
@@ -205,7 +200,6 @@ struct proto pingv6_prot = {
.owner = THIS_MODULE,
.init = ping_init_sock,
.close = ping_close,
- .destroy = ping_v6_destroy,
.pre_connect = ping_v6_pre_connect,
.connect = ip6_datagram_connect_v6_only,
.disconnect = __udp_disconnect,
diff --git a/net/ipv6/raw.c b/net/ipv6/raw.c
index 722de9dd0ff7..a06a9f847db5 100644
--- a/net/ipv6/raw.c
+++ b/net/ipv6/raw.c
@@ -1173,8 +1173,6 @@ static void raw6_destroy(struct sock *sk)
lock_sock(sk);
ip6_flush_pending_frames(sk);
release_sock(sk);
-
- inet6_destroy_sock(sk);
}
static int rawv6_init_sk(struct sock *sk)
diff --git a/net/ipv6/reassembly.c b/net/ipv6/reassembly.c
index ff866f2a879e..5bc8a28e67f9 100644
--- a/net/ipv6/reassembly.c
+++ b/net/ipv6/reassembly.c
@@ -112,10 +112,14 @@ static int ip6_frag_queue(struct frag_queue *fq, struct sk_buff *skb,
struct sk_buff *prev_tail;
struct net_device *dev;
int err = -ENOENT;
+ SKB_DR(reason);
u8 ecn;
- if (fq->q.flags & INET_FRAG_COMPLETE)
+ /* If reassembly is already done, @skb must be a duplicate frag. */
+ if (fq->q.flags & INET_FRAG_COMPLETE) {
+ SKB_DR_SET(reason, DUP_FRAG);
goto err;
+ }
err = -EINVAL;
offset = ntohs(fhdr->frag_off) & ~0x7;
@@ -226,8 +230,9 @@ static int ip6_frag_queue(struct frag_queue *fq, struct sk_buff *skb,
insert_error:
if (err == IPFRAG_DUP) {
- kfree_skb(skb);
- return -EINVAL;
+ SKB_DR_SET(reason, DUP_FRAG);
+ err = -EINVAL;
+ goto err;
}
err = -EINVAL;
__IP6_INC_STATS(net, ip6_dst_idev(skb_dst(skb)),
@@ -237,7 +242,7 @@ discard_fq:
__IP6_INC_STATS(net, ip6_dst_idev(skb_dst(skb)),
IPSTATS_MIB_REASMFAILS);
err:
- kfree_skb(skb);
+ kfree_skb_reason(skb, reason);
return err;
}
diff --git a/net/ipv6/seg6_local.c b/net/ipv6/seg6_local.c
index 8370726ae7bf..487f8e98deaa 100644
--- a/net/ipv6/seg6_local.c
+++ b/net/ipv6/seg6_local.c
@@ -1644,13 +1644,13 @@ static int put_nla_counters(struct sk_buff *skb, struct seg6_local_lwt *slwt)
pcounters = per_cpu_ptr(slwt->pcpu_counters, i);
do {
- start = u64_stats_fetch_begin_irq(&pcounters->syncp);
+ start = u64_stats_fetch_begin(&pcounters->syncp);
packets = u64_stats_read(&pcounters->packets);
bytes = u64_stats_read(&pcounters->bytes);
errors = u64_stats_read(&pcounters->errors);
- } while (u64_stats_fetch_retry_irq(&pcounters->syncp, start));
+ } while (u64_stats_fetch_retry(&pcounters->syncp, start));
counters.packets += packets;
counters.bytes += bytes;
diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c
index 2a3f9296df1e..f676be14e6b6 100644
--- a/net/ipv6/tcp_ipv6.c
+++ b/net/ipv6/tcp_ipv6.c
@@ -1966,12 +1966,6 @@ static int tcp_v6_init_sock(struct sock *sk)
return 0;
}
-static void tcp_v6_destroy_sock(struct sock *sk)
-{
- tcp_v4_destroy_sock(sk);
- inet6_destroy_sock(sk);
-}
-
#ifdef CONFIG_PROC_FS
/* Proc filesystem TCPv6 sock list dumping. */
static void get_openreq6(struct seq_file *seq,
@@ -2164,7 +2158,7 @@ struct proto tcpv6_prot = {
.accept = inet_csk_accept,
.ioctl = tcp_ioctl,
.init = tcp_v6_init_sock,
- .destroy = tcp_v6_destroy_sock,
+ .destroy = tcp_v4_destroy_sock,
.shutdown = tcp_shutdown,
.setsockopt = tcp_setsockopt,
.getsockopt = tcp_getsockopt,
diff --git a/net/ipv6/udp.c b/net/ipv6/udp.c
index bc65e5b7195b..e2de3d906c82 100644
--- a/net/ipv6/udp.c
+++ b/net/ipv6/udp.c
@@ -64,7 +64,7 @@ static void udpv6_destruct_sock(struct sock *sk)
int udpv6_init_sock(struct sock *sk)
{
- skb_queue_head_init(&udp_sk(sk)->reader_queue);
+ udp_lib_init_sock(sk);
sk->sk_destruct = udpv6_destruct_sock;
set_bit(SOCK_SUPPORT_ZC, &sk->sk_socket->flags);
return 0;
@@ -632,7 +632,8 @@ int __udp6_lib_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
/* Tunnels don't have an application socket: don't pass errors back */
if (tunnel) {
if (udp_sk(sk)->encap_err_rcv)
- udp_sk(sk)->encap_err_rcv(sk, skb, offset);
+ udp_sk(sk)->encap_err_rcv(sk, skb, err, uh->dest,
+ ntohl(info), (u8 *)(uh+1));
goto out;
}
@@ -1639,6 +1640,7 @@ do_confirm:
err = 0;
goto out;
}
+EXPORT_SYMBOL(udpv6_sendmsg);
void udpv6_destroy_sock(struct sock *sk)
{
@@ -1662,8 +1664,6 @@ void udpv6_destroy_sock(struct sock *sk)
udp_encap_disable();
}
}
-
- inet6_destroy_sock(sk);
}
/*
@@ -1672,7 +1672,7 @@ void udpv6_destroy_sock(struct sock *sk)
int udpv6_setsockopt(struct sock *sk, int level, int optname, sockptr_t optval,
unsigned int optlen)
{
- if (level == SOL_UDP || level == SOL_UDPLITE)
+ if (level == SOL_UDP || level == SOL_UDPLITE || level == SOL_SOCKET)
return udp_lib_setsockopt(sk, level, optname,
optval, optlen,
udp_v6_push_pending_frames);
diff --git a/net/l2tp/l2tp_ip6.c b/net/l2tp/l2tp_ip6.c
index 9dbd801ddb98..2478aa60145f 100644
--- a/net/l2tp/l2tp_ip6.c
+++ b/net/l2tp/l2tp_ip6.c
@@ -257,8 +257,6 @@ static void l2tp_ip6_destroy_sock(struct sock *sk)
if (tunnel)
l2tp_tunnel_delete(tunnel);
-
- inet6_destroy_sock(sk);
}
static int l2tp_ip6_bind(struct sock *sk, struct sockaddr *uaddr, int addr_len)
diff --git a/net/mac80211/agg-rx.c b/net/mac80211/agg-rx.c
index 9414d3bbd65f..c6fa53230450 100644
--- a/net/mac80211/agg-rx.c
+++ b/net/mac80211/agg-rx.c
@@ -183,34 +183,15 @@ static void ieee80211_add_addbaext(struct ieee80211_sub_if_data *sdata,
const struct ieee80211_addba_ext_ie *req,
u16 buf_size)
{
- struct ieee80211_supported_band *sband;
struct ieee80211_addba_ext_ie *resp;
- const struct ieee80211_sta_he_cap *he_cap;
- u8 frag_level, cap_frag_level;
u8 *pos;
- sband = ieee80211_get_sband(sdata);
- if (!sband)
- return;
- he_cap = ieee80211_get_he_iftype_cap(sband,
- ieee80211_vif_type_p2p(&sdata->vif));
- if (!he_cap)
- return;
-
pos = skb_put_zero(skb, 2 + sizeof(struct ieee80211_addba_ext_ie));
*pos++ = WLAN_EID_ADDBA_EXT;
*pos++ = sizeof(struct ieee80211_addba_ext_ie);
resp = (struct ieee80211_addba_ext_ie *)pos;
resp->data = req->data & IEEE80211_ADDBA_EXT_NO_FRAG;
- frag_level = u32_get_bits(req->data,
- IEEE80211_ADDBA_EXT_FRAG_LEVEL_MASK);
- cap_frag_level = u32_get_bits(he_cap->he_cap_elem.mac_cap_info[0],
- IEEE80211_HE_MAC_CAP0_DYNAMIC_FRAG_MASK);
- if (frag_level > cap_frag_level)
- frag_level = cap_frag_level;
- resp->data |= u8_encode_bits(frag_level,
- IEEE80211_ADDBA_EXT_FRAG_LEVEL_MASK);
resp->data |= u8_encode_bits(buf_size >> IEEE80211_ADDBA_EXT_BUF_SIZE_SHIFT,
IEEE80211_ADDBA_EXT_BUF_SIZE_MASK);
}
@@ -242,7 +223,7 @@ static void ieee80211_send_addba_resp(struct sta_info *sta, u8 *da, u16 tid,
sdata->vif.type == NL80211_IFTYPE_MESH_POINT)
memcpy(mgmt->bssid, sdata->vif.addr, ETH_ALEN);
else if (sdata->vif.type == NL80211_IFTYPE_STATION)
- memcpy(mgmt->bssid, sdata->deflink.u.mgd.bssid, ETH_ALEN);
+ memcpy(mgmt->bssid, sdata->vif.cfg.ap_addr, ETH_ALEN);
else if (sdata->vif.type == NL80211_IFTYPE_ADHOC)
memcpy(mgmt->bssid, sdata->u.ibss.bssid, ETH_ALEN);
@@ -297,9 +278,9 @@ void ___ieee80211_start_rx_ba_session(struct sta_info *sta,
}
if (!sta->sta.deflink.ht_cap.ht_supported &&
- sta->sdata->vif.bss_conf.chandef.chan->band != NL80211_BAND_6GHZ) {
+ !sta->sta.deflink.he_cap.has_he) {
ht_dbg(sta->sdata,
- "STA %pM erroneously requests BA session on tid %d w/o QoS\n",
+ "STA %pM erroneously requests BA session on tid %d w/o HT\n",
sta->sta.addr, tid);
/* send a response anyway, it's an error case if we get here */
goto end;
diff --git a/net/mac80211/agg-tx.c b/net/mac80211/agg-tx.c
index 07c892aa8c73..9c40f8d3bce8 100644
--- a/net/mac80211/agg-tx.c
+++ b/net/mac80211/agg-tx.c
@@ -82,7 +82,7 @@ static void ieee80211_send_addba_request(struct ieee80211_sub_if_data *sdata,
sdata->vif.type == NL80211_IFTYPE_MESH_POINT)
memcpy(mgmt->bssid, sdata->vif.addr, ETH_ALEN);
else if (sdata->vif.type == NL80211_IFTYPE_STATION)
- memcpy(mgmt->bssid, sdata->deflink.u.mgd.bssid, ETH_ALEN);
+ memcpy(mgmt->bssid, sdata->vif.cfg.ap_addr, ETH_ALEN);
else if (sdata->vif.type == NL80211_IFTYPE_ADHOC)
memcpy(mgmt->bssid, sdata->u.ibss.bssid, ETH_ALEN);
diff --git a/net/mac80211/cfg.c b/net/mac80211/cfg.c
index 687b4c878d4a..c848fe04dd44 100644
--- a/net/mac80211/cfg.c
+++ b/net/mac80211/cfg.c
@@ -2554,47 +2554,50 @@ static int ieee80211_change_bss(struct wiphy *wiphy,
struct bss_parameters *params)
{
struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev);
+ struct ieee80211_link_data *link;
struct ieee80211_supported_band *sband;
u32 changed = 0;
- if (!sdata_dereference(sdata->deflink.u.ap.beacon, sdata))
+ link = ieee80211_link_or_deflink(sdata, params->link_id, true);
+ if (IS_ERR(link))
+ return PTR_ERR(link);
+
+ if (!sdata_dereference(link->u.ap.beacon, sdata))
return -ENOENT;
- sband = ieee80211_get_sband(sdata);
+ sband = ieee80211_get_link_sband(link);
if (!sband)
return -EINVAL;
if (params->use_cts_prot >= 0) {
- sdata->vif.bss_conf.use_cts_prot = params->use_cts_prot;
+ link->conf->use_cts_prot = params->use_cts_prot;
changed |= BSS_CHANGED_ERP_CTS_PROT;
}
if (params->use_short_preamble >= 0) {
- sdata->vif.bss_conf.use_short_preamble =
- params->use_short_preamble;
+ link->conf->use_short_preamble = params->use_short_preamble;
changed |= BSS_CHANGED_ERP_PREAMBLE;
}
- if (!sdata->vif.bss_conf.use_short_slot &&
+ if (!link->conf->use_short_slot &&
(sband->band == NL80211_BAND_5GHZ ||
sband->band == NL80211_BAND_6GHZ)) {
- sdata->vif.bss_conf.use_short_slot = true;
+ link->conf->use_short_slot = true;
changed |= BSS_CHANGED_ERP_SLOT;
}
if (params->use_short_slot_time >= 0) {
- sdata->vif.bss_conf.use_short_slot =
- params->use_short_slot_time;
+ link->conf->use_short_slot = params->use_short_slot_time;
changed |= BSS_CHANGED_ERP_SLOT;
}
if (params->basic_rates) {
- ieee80211_parse_bitrates(sdata->vif.bss_conf.chandef.width,
+ ieee80211_parse_bitrates(link->conf->chandef.width,
wiphy->bands[sband->band],
params->basic_rates,
params->basic_rates_len,
- &sdata->vif.bss_conf.basic_rates);
+ &link->conf->basic_rates);
changed |= BSS_CHANGED_BASIC_RATES;
- ieee80211_check_rate_mask(&sdata->deflink);
+ ieee80211_check_rate_mask(link);
}
if (params->ap_isolate >= 0) {
@@ -2606,30 +2609,29 @@ static int ieee80211_change_bss(struct wiphy *wiphy,
}
if (params->ht_opmode >= 0) {
- sdata->vif.bss_conf.ht_operation_mode =
- (u16) params->ht_opmode;
+ link->conf->ht_operation_mode = (u16)params->ht_opmode;
changed |= BSS_CHANGED_HT;
}
if (params->p2p_ctwindow >= 0) {
- sdata->vif.bss_conf.p2p_noa_attr.oppps_ctwindow &=
+ link->conf->p2p_noa_attr.oppps_ctwindow &=
~IEEE80211_P2P_OPPPS_CTWINDOW_MASK;
- sdata->vif.bss_conf.p2p_noa_attr.oppps_ctwindow |=
+ link->conf->p2p_noa_attr.oppps_ctwindow |=
params->p2p_ctwindow & IEEE80211_P2P_OPPPS_CTWINDOW_MASK;
changed |= BSS_CHANGED_P2P_PS;
}
if (params->p2p_opp_ps > 0) {
- sdata->vif.bss_conf.p2p_noa_attr.oppps_ctwindow |=
+ link->conf->p2p_noa_attr.oppps_ctwindow |=
IEEE80211_P2P_OPPPS_ENABLE_BIT;
changed |= BSS_CHANGED_P2P_PS;
} else if (params->p2p_opp_ps == 0) {
- sdata->vif.bss_conf.p2p_noa_attr.oppps_ctwindow &=
+ link->conf->p2p_noa_attr.oppps_ctwindow &=
~IEEE80211_P2P_OPPPS_ENABLE_BIT;
changed |= BSS_CHANGED_P2P_PS;
}
- ieee80211_link_info_change_notify(sdata, &sdata->deflink, changed);
+ ieee80211_link_info_change_notify(sdata, link, changed);
return 0;
}
@@ -4338,9 +4340,6 @@ static int ieee80211_get_txq_stats(struct wiphy *wiphy,
struct ieee80211_sub_if_data *sdata;
int ret = 0;
- if (!local->ops->wake_tx_queue)
- return 1;
-
spin_lock_bh(&local->fq.lock);
rcu_read_lock();
diff --git a/net/mac80211/debugfs.c b/net/mac80211/debugfs.c
index 78c7d60e8667..dfb9f55e2685 100644
--- a/net/mac80211/debugfs.c
+++ b/net/mac80211/debugfs.c
@@ -663,9 +663,7 @@ void debugfs_hw_add(struct ieee80211_local *local)
DEBUGFS_ADD_MODE(force_tx_status, 0600);
DEBUGFS_ADD_MODE(aql_enable, 0600);
DEBUGFS_ADD(aql_pending);
-
- if (local->ops->wake_tx_queue)
- DEBUGFS_ADD_MODE(aqm, 0600);
+ DEBUGFS_ADD_MODE(aqm, 0600);
DEBUGFS_ADD_MODE(airtime_flags, 0600);
diff --git a/net/mac80211/debugfs_netdev.c b/net/mac80211/debugfs_netdev.c
index 5b014786fd2d..c87e1137e5da 100644
--- a/net/mac80211/debugfs_netdev.c
+++ b/net/mac80211/debugfs_netdev.c
@@ -677,8 +677,7 @@ static void add_common_files(struct ieee80211_sub_if_data *sdata)
DEBUGFS_ADD(rc_rateidx_vht_mcs_mask_5ghz);
DEBUGFS_ADD(hw_queues);
- if (sdata->local->ops->wake_tx_queue &&
- sdata->vif.type != NL80211_IFTYPE_P2P_DEVICE &&
+ if (sdata->vif.type != NL80211_IFTYPE_P2P_DEVICE &&
sdata->vif.type != NL80211_IFTYPE_NAN)
DEBUGFS_ADD(aqm);
}
diff --git a/net/mac80211/debugfs_sta.c b/net/mac80211/debugfs_sta.c
index d3397c1248d3..7a3d7893e19d 100644
--- a/net/mac80211/debugfs_sta.c
+++ b/net/mac80211/debugfs_sta.c
@@ -5,7 +5,7 @@
* Copyright 2007 Johannes Berg <johannes@sipsolutions.net>
* Copyright 2013-2014 Intel Mobile Communications GmbH
* Copyright(c) 2016 Intel Deutschland GmbH
- * Copyright (C) 2018 - 2021 Intel Corporation
+ * Copyright (C) 2018 - 2022 Intel Corporation
*/
#include <linux/debugfs.h>
@@ -435,8 +435,29 @@ static ssize_t sta_agg_status_write(struct file *file, const char __user *userbu
}
STA_OPS_RW(agg_status);
-static ssize_t sta_ht_capa_read(struct file *file, char __user *userbuf,
- size_t count, loff_t *ppos)
+/* link sta attributes */
+#define LINK_STA_OPS(name) \
+static const struct file_operations link_sta_ ##name## _ops = { \
+ .read = link_sta_##name##_read, \
+ .open = simple_open, \
+ .llseek = generic_file_llseek, \
+}
+
+static ssize_t link_sta_addr_read(struct file *file, char __user *userbuf,
+ size_t count, loff_t *ppos)
+{
+ struct link_sta_info *link_sta = file->private_data;
+ u8 mac[3 * ETH_ALEN + 1];
+
+ snprintf(mac, sizeof(mac), "%pM\n", link_sta->pub->addr);
+
+ return simple_read_from_buffer(userbuf, count, ppos, mac, 3 * ETH_ALEN);
+}
+
+LINK_STA_OPS(addr);
+
+static ssize_t link_sta_ht_capa_read(struct file *file, char __user *userbuf,
+ size_t count, loff_t *ppos)
{
#define PRINT_HT_CAP(_cond, _str) \
do { \
@@ -446,8 +467,8 @@ static ssize_t sta_ht_capa_read(struct file *file, char __user *userbuf,
char *buf, *p;
int i;
ssize_t bufsz = 512;
- struct sta_info *sta = file->private_data;
- struct ieee80211_sta_ht_cap *htc = &sta->sta.deflink.ht_cap;
+ struct link_sta_info *link_sta = file->private_data;
+ struct ieee80211_sta_ht_cap *htc = &link_sta->pub->ht_cap;
ssize_t ret;
buf = kzalloc(bufsz, GFP_KERNEL);
@@ -524,14 +545,14 @@ static ssize_t sta_ht_capa_read(struct file *file, char __user *userbuf,
kfree(buf);
return ret;
}
-STA_OPS(ht_capa);
+LINK_STA_OPS(ht_capa);
-static ssize_t sta_vht_capa_read(struct file *file, char __user *userbuf,
- size_t count, loff_t *ppos)
+static ssize_t link_sta_vht_capa_read(struct file *file, char __user *userbuf,
+ size_t count, loff_t *ppos)
{
char *buf, *p;
- struct sta_info *sta = file->private_data;
- struct ieee80211_sta_vht_cap *vhtc = &sta->sta.deflink.vht_cap;
+ struct link_sta_info *link_sta = file->private_data;
+ struct ieee80211_sta_vht_cap *vhtc = &link_sta->pub->vht_cap;
ssize_t ret;
ssize_t bufsz = 512;
@@ -638,15 +659,15 @@ static ssize_t sta_vht_capa_read(struct file *file, char __user *userbuf,
kfree(buf);
return ret;
}
-STA_OPS(vht_capa);
+LINK_STA_OPS(vht_capa);
-static ssize_t sta_he_capa_read(struct file *file, char __user *userbuf,
- size_t count, loff_t *ppos)
+static ssize_t link_sta_he_capa_read(struct file *file, char __user *userbuf,
+ size_t count, loff_t *ppos)
{
char *buf, *p;
size_t buf_sz = PAGE_SIZE;
- struct sta_info *sta = file->private_data;
- struct ieee80211_sta_he_cap *hec = &sta->sta.deflink.he_cap;
+ struct link_sta_info *link_sta = file->private_data;
+ struct ieee80211_sta_he_cap *hec = &link_sta->pub->he_cap;
struct ieee80211_he_mcs_nss_supp *nss = &hec->he_mcs_nss_supp;
u8 ppe_size;
u8 *cap;
@@ -1011,7 +1032,7 @@ out:
kfree(buf);
return ret;
}
-STA_OPS(he_capa);
+LINK_STA_OPS(he_capa);
#define DEBUGFS_ADD(name) \
debugfs_create_file(#name, 0400, \
@@ -1048,18 +1069,11 @@ void ieee80211_sta_debugfs_add(struct sta_info *sta)
DEBUGFS_ADD(num_ps_buf_frames);
DEBUGFS_ADD(last_seq_ctrl);
DEBUGFS_ADD(agg_status);
- DEBUGFS_ADD(ht_capa);
- DEBUGFS_ADD(vht_capa);
- DEBUGFS_ADD(he_capa);
-
- DEBUGFS_ADD_COUNTER(rx_duplicates, deflink.rx_stats.num_duplicates);
- DEBUGFS_ADD_COUNTER(rx_fragments, deflink.rx_stats.fragments);
+ /* FIXME: Kept here as the statistics are only done on the deflink */
DEBUGFS_ADD_COUNTER(tx_filtered, deflink.status_stats.filtered);
- if (local->ops->wake_tx_queue) {
- DEBUGFS_ADD(aqm);
- DEBUGFS_ADD(airtime);
- }
+ DEBUGFS_ADD(aqm);
+ DEBUGFS_ADD(airtime);
if (wiphy_ext_feature_isset(local->hw.wiphy,
NL80211_EXT_FEATURE_AQL))
@@ -1076,3 +1090,85 @@ void ieee80211_sta_debugfs_remove(struct sta_info *sta)
debugfs_remove_recursive(sta->debugfs_dir);
sta->debugfs_dir = NULL;
}
+
+#undef DEBUGFS_ADD
+#undef DEBUGFS_ADD_COUNTER
+
+#define DEBUGFS_ADD(name) \
+ debugfs_create_file(#name, 0400, \
+ link_sta->debugfs_dir, link_sta, &link_sta_ ##name## _ops)
+#define DEBUGFS_ADD_COUNTER(name, field) \
+ debugfs_create_ulong(#name, 0400, link_sta->debugfs_dir, &link_sta->field)
+
+void ieee80211_link_sta_debugfs_add(struct link_sta_info *link_sta)
+{
+ if (WARN_ON(!link_sta->sta->debugfs_dir))
+ return;
+
+ /* For non-MLO, leave the files in the main directory. */
+ if (link_sta->sta->sta.valid_links) {
+ char link_dir_name[10];
+
+ snprintf(link_dir_name, sizeof(link_dir_name),
+ "link-%d", link_sta->link_id);
+
+ link_sta->debugfs_dir =
+ debugfs_create_dir(link_dir_name,
+ link_sta->sta->debugfs_dir);
+
+ DEBUGFS_ADD(addr);
+ } else {
+ if (WARN_ON(link_sta != &link_sta->sta->deflink))
+ return;
+
+ link_sta->debugfs_dir = link_sta->sta->debugfs_dir;
+ }
+
+ DEBUGFS_ADD(ht_capa);
+ DEBUGFS_ADD(vht_capa);
+ DEBUGFS_ADD(he_capa);
+
+ DEBUGFS_ADD_COUNTER(rx_duplicates, rx_stats.num_duplicates);
+ DEBUGFS_ADD_COUNTER(rx_fragments, rx_stats.fragments);
+}
+
+void ieee80211_link_sta_debugfs_remove(struct link_sta_info *link_sta)
+{
+ if (!link_sta->debugfs_dir || !link_sta->sta->debugfs_dir) {
+ link_sta->debugfs_dir = NULL;
+ return;
+ }
+
+ if (link_sta->debugfs_dir == link_sta->sta->debugfs_dir) {
+ WARN_ON(link_sta != &link_sta->sta->deflink);
+ link_sta->sta->debugfs_dir = NULL;
+ return;
+ }
+
+ debugfs_remove_recursive(link_sta->debugfs_dir);
+ link_sta->debugfs_dir = NULL;
+}
+
+void ieee80211_link_sta_debugfs_drv_add(struct link_sta_info *link_sta)
+{
+ if (WARN_ON(!link_sta->debugfs_dir))
+ return;
+
+ drv_link_sta_add_debugfs(link_sta->sta->local, link_sta->sta->sdata,
+ link_sta->pub, link_sta->debugfs_dir);
+}
+
+void ieee80211_link_sta_debugfs_drv_remove(struct link_sta_info *link_sta)
+{
+ if (!link_sta->debugfs_dir)
+ return;
+
+ if (WARN_ON(link_sta->debugfs_dir == link_sta->sta->debugfs_dir))
+ return;
+
+ /* Recreate the directory excluding the driver data */
+ debugfs_remove_recursive(link_sta->debugfs_dir);
+ link_sta->debugfs_dir = NULL;
+
+ ieee80211_link_sta_debugfs_add(link_sta);
+}
diff --git a/net/mac80211/debugfs_sta.h b/net/mac80211/debugfs_sta.h
index d2e7c27ad6d1..cde8148bdb18 100644
--- a/net/mac80211/debugfs_sta.h
+++ b/net/mac80211/debugfs_sta.h
@@ -7,9 +7,21 @@
#ifdef CONFIG_MAC80211_DEBUGFS
void ieee80211_sta_debugfs_add(struct sta_info *sta);
void ieee80211_sta_debugfs_remove(struct sta_info *sta);
+
+void ieee80211_link_sta_debugfs_add(struct link_sta_info *link_sta);
+void ieee80211_link_sta_debugfs_remove(struct link_sta_info *link_sta);
+
+void ieee80211_link_sta_debugfs_drv_add(struct link_sta_info *link_sta);
+void ieee80211_link_sta_debugfs_drv_remove(struct link_sta_info *link_sta);
#else
static inline void ieee80211_sta_debugfs_add(struct sta_info *sta) {}
static inline void ieee80211_sta_debugfs_remove(struct sta_info *sta) {}
+
+static inline void ieee80211_link_sta_debugfs_add(struct link_sta_info *link_sta) {}
+static inline void ieee80211_link_sta_debugfs_remove(struct link_sta_info *link_sta) {}
+
+static inline void ieee80211_link_sta_debugfs_drv_add(struct link_sta_info *link_sta) {}
+static inline void ieee80211_link_sta_debugfs_drv_remove(struct link_sta_info *link_sta) {}
#endif
#endif /* __MAC80211_DEBUGFS_STA_H */
diff --git a/net/mac80211/driver-ops.c b/net/mac80211/driver-ops.c
index 5392ffa18270..d737db4e07e2 100644
--- a/net/mac80211/driver-ops.c
+++ b/net/mac80211/driver-ops.c
@@ -7,6 +7,7 @@
#include "ieee80211_i.h"
#include "trace.h"
#include "driver-ops.h"
+#include "debugfs_sta.h"
int drv_start(struct ieee80211_local *local)
{
@@ -497,6 +498,11 @@ int drv_change_sta_links(struct ieee80211_local *local,
struct ieee80211_sta *sta,
u16 old_links, u16 new_links)
{
+ struct sta_info *info = container_of(sta, struct sta_info, sta);
+ struct link_sta_info *link_sta;
+ unsigned long links_to_add;
+ unsigned long links_to_rem;
+ unsigned int link_id;
int ret = -EOPNOTSUPP;
might_sleep();
@@ -510,11 +516,30 @@ int drv_change_sta_links(struct ieee80211_local *local,
if (old_links == new_links)
return 0;
+ links_to_add = ~old_links & new_links;
+ links_to_rem = old_links & ~new_links;
+
+ for_each_set_bit(link_id, &links_to_rem, IEEE80211_MLD_MAX_NUM_LINKS) {
+ link_sta = rcu_dereference_protected(info->link[link_id],
+ lockdep_is_held(&local->sta_mtx));
+
+ ieee80211_link_sta_debugfs_drv_remove(link_sta);
+ }
+
trace_drv_change_sta_links(local, sdata, sta, old_links, new_links);
if (local->ops->change_sta_links)
ret = local->ops->change_sta_links(&local->hw, &sdata->vif, sta,
old_links, new_links);
trace_drv_return_int(local, ret);
- return ret;
+ if (ret)
+ return ret;
+
+ for_each_set_bit(link_id, &links_to_add, IEEE80211_MLD_MAX_NUM_LINKS) {
+ link_sta = rcu_dereference_protected(info->link[link_id],
+ lockdep_is_held(&local->sta_mtx));
+ ieee80211_link_sta_debugfs_drv_add(link_sta);
+ }
+
+ return 0;
}
diff --git a/net/mac80211/driver-ops.h b/net/mac80211/driver-ops.h
index 81e40b0a3b16..809bad53e15b 100644
--- a/net/mac80211/driver-ops.h
+++ b/net/mac80211/driver-ops.h
@@ -480,6 +480,22 @@ static inline void drv_sta_add_debugfs(struct ieee80211_local *local,
local->ops->sta_add_debugfs(&local->hw, &sdata->vif,
sta, dir);
}
+
+static inline void drv_link_sta_add_debugfs(struct ieee80211_local *local,
+ struct ieee80211_sub_if_data *sdata,
+ struct ieee80211_link_sta *link_sta,
+ struct dentry *dir)
+{
+ might_sleep();
+
+ sdata = get_bss_sdata(sdata);
+ if (!check_sdata_in_driver(sdata))
+ return;
+
+ if (local->ops->link_sta_add_debugfs)
+ local->ops->link_sta_add_debugfs(&local->hw, &sdata->vif,
+ link_sta, dir);
+}
#endif
static inline void drv_sta_pre_rcu_remove(struct ieee80211_local *local,
diff --git a/net/mac80211/ieee80211_i.h b/net/mac80211/ieee80211_i.h
index a842f2e1c230..63ff0d2524b6 100644
--- a/net/mac80211/ieee80211_i.h
+++ b/net/mac80211/ieee80211_i.h
@@ -390,6 +390,7 @@ struct ieee80211_mgd_auth_data {
bool done, waiting;
bool peer_confirmed;
bool timeout_started;
+ int link_id;
u8 ap_addr[ETH_ALEN] __aligned(2);
@@ -412,6 +413,8 @@ struct ieee80211_mgd_assoc_data {
u8 *elems; /* pointing to inside ie[] below */
ieee80211_conn_flags_t conn_flags;
+
+ u16 status;
} link[IEEE80211_MLD_MAX_NUM_LINKS];
u8 ap_addr[ETH_ALEN] __aligned(2);
@@ -1707,6 +1710,17 @@ struct ieee802_11_elems {
u8 tx_pwr_env_num;
u8 eht_cap_len;
+ /* mult-link element can be de-fragmented and thus u8 is not sufficient */
+ size_t multi_link_len;
+
+ /*
+ * store the per station profile pointer and length in case that the
+ * parsing also handled Multi-Link element parsing for a specific link
+ * ID.
+ */
+ struct ieee80211_mle_per_sta_profile *prof;
+ size_t sta_prof_len;
+
/* whether a parse error occurred while retrieving these elements */
bool parse_error;
@@ -2205,9 +2219,13 @@ static inline void ieee80211_tx_skb(struct ieee80211_sub_if_data *sdata,
* represent a non-transmitting BSS in which case the data
* for that non-transmitting BSS is returned
* @link_id: the link ID to parse elements for, if a STA profile
- * is present in the multi-link element, or -1 to ignore
+ * is present in the multi-link element, or -1 to ignore;
+ * note that the code currently assumes parsing an association
+ * (or re-association) response frame if this is given
* @from_ap: frame is received from an AP (currently used only
* for EHT capabilities parsing)
+ * @scratch_len: if non zero, specifies the requested length of the scratch
+ * buffer; otherwise, 'len' is used.
*/
struct ieee80211_elems_parse_params {
const u8 *start;
@@ -2218,6 +2236,7 @@ struct ieee80211_elems_parse_params {
struct cfg80211_bss *bss;
int link_id;
bool from_ap;
+ size_t scratch_len;
};
struct ieee802_11_elems *
@@ -2288,7 +2307,6 @@ void ieee80211_wake_queue_by_reason(struct ieee80211_hw *hw, int queue,
void ieee80211_stop_queue_by_reason(struct ieee80211_hw *hw, int queue,
enum queue_stop_reason reason,
bool refcounted);
-void ieee80211_propagate_queue_wake(struct ieee80211_local *local, int queue);
void ieee80211_add_pending_skb(struct ieee80211_local *local,
struct sk_buff *skb);
void ieee80211_add_pending_skbs(struct ieee80211_local *local,
diff --git a/net/mac80211/iface.c b/net/mac80211/iface.c
index dd9ac1f7d2ea..7c4ce716c939 100644
--- a/net/mac80211/iface.c
+++ b/net/mac80211/iface.c
@@ -458,12 +458,6 @@ static void ieee80211_do_stop(struct ieee80211_sub_if_data *sdata, bool going_do
if (cancel_scan)
ieee80211_scan_cancel(local);
- /*
- * Stop TX on this interface first.
- */
- if (!local->ops->wake_tx_queue && sdata->dev)
- netif_tx_stop_all_queues(sdata->dev);
-
ieee80211_roc_purge(local, sdata);
switch (sdata->vif.type) {
@@ -811,13 +805,6 @@ static void ieee80211_uninit(struct net_device *dev)
ieee80211_teardown_sdata(IEEE80211_DEV_TO_SUB_IF(dev));
}
-static u16 ieee80211_netdev_select_queue(struct net_device *dev,
- struct sk_buff *skb,
- struct net_device *sb_dev)
-{
- return ieee80211_select_queue(IEEE80211_DEV_TO_SUB_IF(dev), skb);
-}
-
static void
ieee80211_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats)
{
@@ -831,7 +818,6 @@ static const struct net_device_ops ieee80211_dataif_ops = {
.ndo_start_xmit = ieee80211_subif_start_xmit,
.ndo_set_rx_mode = ieee80211_set_multicast_list,
.ndo_set_mac_address = ieee80211_change_mac,
- .ndo_select_queue = ieee80211_netdev_select_queue,
.ndo_get_stats64 = ieee80211_get_stats64,
};
@@ -939,7 +925,6 @@ static const struct net_device_ops ieee80211_dataif_8023_ops = {
.ndo_start_xmit = ieee80211_subif_start_xmit_8023,
.ndo_set_rx_mode = ieee80211_set_multicast_list,
.ndo_set_mac_address = ieee80211_change_mac,
- .ndo_select_queue = ieee80211_netdev_select_queue,
.ndo_get_stats64 = ieee80211_get_stats64,
.ndo_fill_forward_path = ieee80211_netdev_fill_forward_path,
};
@@ -1441,35 +1426,6 @@ int ieee80211_do_open(struct wireless_dev *wdev, bool coming_up)
ieee80211_recalc_ps(local);
- if (sdata->vif.type == NL80211_IFTYPE_MONITOR ||
- sdata->vif.type == NL80211_IFTYPE_AP_VLAN ||
- local->ops->wake_tx_queue) {
- /* XXX: for AP_VLAN, actually track AP queues */
- if (dev)
- netif_tx_start_all_queues(dev);
- } else if (dev) {
- unsigned long flags;
- int n_acs = IEEE80211_NUM_ACS;
- int ac;
-
- if (local->hw.queues < IEEE80211_NUM_ACS)
- n_acs = 1;
-
- spin_lock_irqsave(&local->queue_stop_reason_lock, flags);
- if (sdata->vif.cab_queue == IEEE80211_INVAL_HW_QUEUE ||
- (local->queue_stop_reasons[sdata->vif.cab_queue] == 0 &&
- skb_queue_empty(&local->pending[sdata->vif.cab_queue]))) {
- for (ac = 0; ac < n_acs; ac++) {
- int ac_queue = sdata->vif.hw_queue[ac];
-
- if (local->queue_stop_reasons[ac_queue] == 0 &&
- skb_queue_empty(&local->pending[ac_queue]))
- netif_start_subqueue(dev, ac);
- }
- }
- spin_unlock_irqrestore(&local->queue_stop_reason_lock, flags);
- }
-
set_bit(SDATA_STATE_RUNNING, &sdata->state);
return 0;
@@ -1499,17 +1455,12 @@ static void ieee80211_if_setup(struct net_device *dev)
{
ether_setup(dev);
dev->priv_flags &= ~IFF_TX_SKB_SHARING;
+ dev->priv_flags |= IFF_NO_QUEUE;
dev->netdev_ops = &ieee80211_dataif_ops;
dev->needs_free_netdev = true;
dev->priv_destructor = ieee80211_if_free;
}
-static void ieee80211_if_setup_no_queue(struct net_device *dev)
-{
- ieee80211_if_setup(dev);
- dev->priv_flags |= IFF_NO_QUEUE;
-}
-
static void ieee80211_iface_process_skb(struct ieee80211_local *local,
struct ieee80211_sub_if_data *sdata,
struct sk_buff *skb)
@@ -2094,9 +2045,7 @@ int ieee80211_if_add(struct ieee80211_local *local, const char *name,
struct net_device *ndev = NULL;
struct ieee80211_sub_if_data *sdata = NULL;
struct txq_info *txqi;
- void (*if_setup)(struct net_device *dev);
int ret, i;
- int txqs = 1;
ASSERT_RTNL();
@@ -2119,30 +2068,18 @@ int ieee80211_if_add(struct ieee80211_local *local, const char *name,
sizeof(void *));
int txq_size = 0;
- if (local->ops->wake_tx_queue &&
- type != NL80211_IFTYPE_AP_VLAN &&
+ if (type != NL80211_IFTYPE_AP_VLAN &&
(type != NL80211_IFTYPE_MONITOR ||
(params->flags & MONITOR_FLAG_ACTIVE)))
txq_size += sizeof(struct txq_info) +
local->hw.txq_data_size;
- if (local->ops->wake_tx_queue) {
- if_setup = ieee80211_if_setup_no_queue;
- } else {
- if_setup = ieee80211_if_setup;
- if (local->hw.queues >= IEEE80211_NUM_ACS)
- txqs = IEEE80211_NUM_ACS;
- }
-
ndev = alloc_netdev_mqs(size + txq_size,
name, name_assign_type,
- if_setup, txqs, 1);
+ ieee80211_if_setup, 1, 1);
if (!ndev)
return -ENOMEM;
- if (!local->ops->wake_tx_queue && local->hw.wiphy->tx_queue_len)
- ndev->tx_queue_len = local->hw.wiphy->tx_queue_len;
-
dev_net_set(ndev, wiphy_net(local->hw.wiphy));
ndev->tstats = netdev_alloc_pcpu_stats(struct pcpu_sw_netstats);
diff --git a/net/mac80211/link.c b/net/mac80211/link.c
index e309708abae8..d1f5a9f7c647 100644
--- a/net/mac80211/link.c
+++ b/net/mac80211/link.c
@@ -357,6 +357,11 @@ static int _ieee80211_set_active_links(struct ieee80211_sub_if_data *sdata,
list_for_each_entry(sta, &local->sta_list, list) {
if (sdata != sta->sdata)
continue;
+
+ /* this is very temporary, but do it anyway */
+ __ieee80211_sta_recalc_aggregates(sta,
+ old_active | active_links);
+
ret = drv_change_sta_links(local, sdata, &sta->sta,
old_active,
old_active | active_links);
@@ -369,10 +374,22 @@ static int _ieee80211_set_active_links(struct ieee80211_sub_if_data *sdata,
list_for_each_entry(sta, &local->sta_list, list) {
if (sdata != sta->sdata)
continue;
+
+ __ieee80211_sta_recalc_aggregates(sta, active_links);
+
ret = drv_change_sta_links(local, sdata, &sta->sta,
old_active | active_links,
active_links);
WARN_ON_ONCE(ret);
+
+ /*
+ * Do it again, just in case - the driver might very
+ * well have called ieee80211_sta_recalc_aggregates()
+ * from there when filling in the new links, which
+ * would set it wrong since the vif's active links are
+ * not switched yet...
+ */
+ __ieee80211_sta_recalc_aggregates(sta, active_links);
}
for_each_set_bit(link_id, &add, IEEE80211_MLD_MAX_NUM_LINKS) {
diff --git a/net/mac80211/main.c b/net/mac80211/main.c
index 02b5abc7326b..846528850612 100644
--- a/net/mac80211/main.c
+++ b/net/mac80211/main.c
@@ -630,7 +630,7 @@ struct ieee80211_hw *ieee80211_alloc_hw_nm(size_t priv_data_len,
if (WARN_ON(!ops->tx || !ops->start || !ops->stop || !ops->config ||
!ops->add_interface || !ops->remove_interface ||
- !ops->configure_filter))
+ !ops->configure_filter || !ops->wake_tx_queue))
return NULL;
if (WARN_ON(ops->sta_state && (ops->sta_add || ops->sta_remove)))
@@ -719,9 +719,7 @@ struct ieee80211_hw *ieee80211_alloc_hw_nm(size_t priv_data_len,
if (!ops->set_key)
wiphy->flags |= WIPHY_FLAG_IBSS_RSN;
- if (ops->wake_tx_queue)
- wiphy_ext_feature_set(wiphy, NL80211_EXT_FEATURE_TXQS);
-
+ wiphy_ext_feature_set(wiphy, NL80211_EXT_FEATURE_TXQS);
wiphy_ext_feature_set(wiphy, NL80211_EXT_FEATURE_RRM);
wiphy->bss_priv_size = sizeof(struct ieee80211_bss);
@@ -834,10 +832,7 @@ struct ieee80211_hw *ieee80211_alloc_hw_nm(size_t priv_data_len,
atomic_set(&local->agg_queue_stop[i], 0);
}
tasklet_setup(&local->tx_pending_tasklet, ieee80211_tx_pending);
-
- if (ops->wake_tx_queue)
- tasklet_setup(&local->wake_txqs_tasklet, ieee80211_wake_txqs);
-
+ tasklet_setup(&local->wake_txqs_tasklet, ieee80211_wake_txqs);
tasklet_setup(&local->tasklet, ieee80211_tasklet_handler);
skb_queue_head_init(&local->skb_queue);
@@ -1087,6 +1082,16 @@ int ieee80211_register_hw(struct ieee80211_hw *hw)
channels += sband->n_channels;
+ /*
+ * Due to the way the aggregation code handles this and it
+ * being an HT capability, we can't really support delayed
+ * BA in MLO (yet).
+ */
+ if (WARN_ON(sband->ht_cap.ht_supported &&
+ (sband->ht_cap.cap & IEEE80211_HT_CAP_DELAY_BA) &&
+ hw->wiphy->flags & WIPHY_FLAG_SUPPORTS_MLO))
+ return -EINVAL;
+
if (max_bitrates < sband->n_bitrates)
max_bitrates = sband->n_bitrates;
supp_ht = supp_ht || sband->ht_cap.ht_supported;
@@ -1155,6 +1160,8 @@ int ieee80211_register_hw(struct ieee80211_hw *hw)
if (!local->int_scan_req)
return -ENOMEM;
+ eth_broadcast_addr(local->int_scan_req->bssid);
+
for (band = 0; band < NUM_NL80211_BANDS; band++) {
if (!local->hw.wiphy->bands[band])
continue;
diff --git a/net/mac80211/mlme.c b/net/mac80211/mlme.c
index d8484cd870de..a804e0220ed7 100644
--- a/net/mac80211/mlme.c
+++ b/net/mac80211/mlme.c
@@ -2717,18 +2717,10 @@ static u32 ieee80211_link_set_associated(struct ieee80211_link_data *link,
}
if (link->u.mgd.have_beacon) {
- /*
- * If the AP is buggy we may get here with no DTIM period
- * known, so assume it's 1 which is the only safe assumption
- * in that case, although if the TIM IE is broken powersave
- * probably just won't work at all.
- */
- bss_conf->dtim_period = link->u.mgd.dtim_period ?: 1;
bss_conf->beacon_rate = bss->beacon_rate;
changed |= BSS_CHANGED_BEACON_INFO;
} else {
bss_conf->beacon_rate = NULL;
- bss_conf->dtim_period = 0;
}
/* Tell the driver to monitor connection quality (if supported) */
@@ -2754,7 +2746,8 @@ static void ieee80211_set_associated(struct ieee80211_sub_if_data *sdata,
struct cfg80211_bss *cbss = assoc_data->link[link_id].bss;
struct ieee80211_link_data *link;
- if (!cbss)
+ if (!cbss ||
+ assoc_data->link[link_id].status != WLAN_STATUS_SUCCESS)
continue;
link = sdata_dereference(sdata->link[link_id], sdata);
@@ -2782,7 +2775,8 @@ static void ieee80211_set_associated(struct ieee80211_sub_if_data *sdata,
struct ieee80211_link_data *link;
struct cfg80211_bss *cbss = assoc_data->link[link_id].bss;
- if (!cbss)
+ if (!cbss ||
+ assoc_data->link[link_id].status != WLAN_STATUS_SUCCESS)
continue;
link = sdata_dereference(sdata->link[link_id], sdata);
@@ -3868,9 +3862,15 @@ static void ieee80211_get_rates(struct ieee80211_supported_band *sband,
}
}
-static bool ieee80211_twt_req_supported(const struct link_sta_info *link_sta,
+static bool ieee80211_twt_req_supported(struct ieee80211_sub_if_data *sdata,
+ struct ieee80211_supported_band *sband,
+ const struct link_sta_info *link_sta,
const struct ieee802_11_elems *elems)
{
+ const struct ieee80211_sta_he_cap *own_he_cap =
+ ieee80211_get_he_iftype_cap(sband,
+ ieee80211_vif_type_p2p(&sdata->vif));
+
if (elems->ext_capab_len < 10)
return false;
@@ -3878,14 +3878,19 @@ static bool ieee80211_twt_req_supported(const struct link_sta_info *link_sta,
return false;
return link_sta->pub->he_cap.he_cap_elem.mac_cap_info[0] &
- IEEE80211_HE_MAC_CAP0_TWT_RES;
+ IEEE80211_HE_MAC_CAP0_TWT_RES &&
+ own_he_cap &&
+ (own_he_cap->he_cap_elem.mac_cap_info[0] &
+ IEEE80211_HE_MAC_CAP0_TWT_REQ);
}
-static int ieee80211_recalc_twt_req(struct ieee80211_link_data *link,
+static int ieee80211_recalc_twt_req(struct ieee80211_sub_if_data *sdata,
+ struct ieee80211_supported_band *sband,
+ struct ieee80211_link_data *link,
struct link_sta_info *link_sta,
struct ieee802_11_elems *elems)
{
- bool twt = ieee80211_twt_req_supported(link_sta, elems);
+ bool twt = ieee80211_twt_req_supported(sdata, sband, link_sta, elems);
if (link->conf->twt_requester != twt) {
link->conf->twt_requester = twt;
@@ -3923,11 +3928,12 @@ static bool ieee80211_assoc_config_link(struct ieee80211_link_data *link,
struct ieee80211_mgd_assoc_data *assoc_data = sdata->u.mgd.assoc_data;
struct ieee80211_bss_conf *bss_conf = link->conf;
struct ieee80211_local *local = sdata->local;
+ unsigned int link_id = link->link_id;
struct ieee80211_elems_parse_params parse_params = {
.start = elem_start,
.len = elem_len,
.bss = cbss,
- .link_id = link == &sdata->deflink ? -1 : link->link_id,
+ .link_id = link_id == assoc_data->assoc_link_id ? -1 : link_id,
.from_ap = true,
};
bool is_6ghz = cbss->channel->band == NL80211_BAND_6GHZ;
@@ -3942,8 +3948,35 @@ static bool ieee80211_assoc_config_link(struct ieee80211_link_data *link,
if (!elems)
return false;
- /* FIXME: use from STA profile element after parsing that */
- capab_info = le16_to_cpu(mgmt->u.assoc_resp.capab_info);
+ if (link_id == assoc_data->assoc_link_id) {
+ capab_info = le16_to_cpu(mgmt->u.assoc_resp.capab_info);
+
+ /*
+ * we should not get to this flow unless the association was
+ * successful, so set the status directly to success
+ */
+ assoc_data->link[link_id].status = WLAN_STATUS_SUCCESS;
+ } else if (!elems->prof) {
+ ret = false;
+ goto out;
+ } else {
+ const u8 *ptr = elems->prof->variable +
+ elems->prof->sta_info_len - 1;
+
+ /*
+ * During parsing, we validated that these fields exist,
+ * otherwise elems->prof would have been set to NULL.
+ */
+ capab_info = get_unaligned_le16(ptr);
+ assoc_data->link[link_id].status = get_unaligned_le16(ptr + 2);
+
+ if (assoc_data->link[link_id].status != WLAN_STATUS_SUCCESS) {
+ link_info(link, "association response status code=%u\n",
+ assoc_data->link[link_id].status);
+ ret = true;
+ goto out;
+ }
+ }
if (!is_s1g && !elems->supp_rates) {
sdata_info(sdata, "no SuppRates element in AssocResp\n");
@@ -4099,7 +4132,8 @@ static bool ieee80211_assoc_config_link(struct ieee80211_link_data *link,
else
bss_conf->twt_protected = false;
- *changed |= ieee80211_recalc_twt_req(link, link_sta, elems);
+ *changed |= ieee80211_recalc_twt_req(sdata, sband, link,
+ link_sta, elems);
if (elems->eht_operation && elems->eht_cap &&
!(link->u.mgd.conn_flags & IEEE80211_CONN_DISABLE_EHT)) {
@@ -4864,6 +4898,7 @@ static bool ieee80211_assoc_success(struct ieee80211_sub_if_data *sdata,
unsigned int link_id;
struct sta_info *sta;
u64 changed[IEEE80211_MLD_MAX_NUM_LINKS] = {};
+ u16 valid_links = 0;
int err;
mutex_lock(&sdata->local->sta_mtx);
@@ -4876,8 +4911,6 @@ static bool ieee80211_assoc_success(struct ieee80211_sub_if_data *sdata,
goto out_err;
if (sdata->vif.valid_links) {
- u16 valid_links = 0;
-
for (link_id = 0; link_id < IEEE80211_MLD_MAX_NUM_LINKS; link_id++) {
if (!assoc_data->link[link_id].bss)
continue;
@@ -4894,10 +4927,11 @@ static bool ieee80211_assoc_success(struct ieee80211_sub_if_data *sdata,
}
for (link_id = 0; link_id < IEEE80211_MLD_MAX_NUM_LINKS; link_id++) {
+ struct cfg80211_bss *cbss = assoc_data->link[link_id].bss;
struct ieee80211_link_data *link;
struct link_sta_info *link_sta;
- if (!assoc_data->link[link_id].bss)
+ if (!cbss)
continue;
link = sdata_dereference(sdata->link[link_id], sdata);
@@ -4906,28 +4940,36 @@ static bool ieee80211_assoc_success(struct ieee80211_sub_if_data *sdata,
if (sdata->vif.valid_links)
link_info(link,
- "local address %pM, AP link address %pM\n",
+ "local address %pM, AP link address %pM%s\n",
link->conf->addr,
- assoc_data->link[link_id].bss->bssid);
+ assoc_data->link[link_id].bss->bssid,
+ link_id == assoc_data->assoc_link_id ?
+ " (assoc)" : "");
link_sta = rcu_dereference_protected(sta->link[link_id],
lockdep_is_held(&local->sta_mtx));
if (WARN_ON(!link_sta))
goto out_err;
- if (link_id != assoc_data->assoc_link_id) {
- struct cfg80211_bss *cbss = assoc_data->link[link_id].bss;
+ if (!link->u.mgd.have_beacon) {
const struct cfg80211_bss_ies *ies;
rcu_read_lock();
- ies = rcu_dereference(cbss->ies);
+ ies = rcu_dereference(cbss->beacon_ies);
+ if (ies)
+ link->u.mgd.have_beacon = true;
+ else
+ ies = rcu_dereference(cbss->ies);
ieee80211_get_dtim(ies,
&link->conf->sync_dtim_count,
&link->u.mgd.dtim_period);
- link->conf->dtim_period = link->u.mgd.dtim_period ?: 1;
link->conf->beacon_int = cbss->beacon_interval;
rcu_read_unlock();
+ }
+
+ link->conf->dtim_period = link->u.mgd.dtim_period ?: 1;
+ if (link_id != assoc_data->assoc_link_id) {
err = ieee80211_prep_channel(sdata, link, cbss,
&link->u.mgd.conn_flags);
if (err) {
@@ -4947,6 +4989,12 @@ static bool ieee80211_assoc_success(struct ieee80211_sub_if_data *sdata,
&changed[link_id]))
goto out_err;
+ if (assoc_data->link[link_id].status != WLAN_STATUS_SUCCESS) {
+ valid_links &= ~BIT(link_id);
+ ieee80211_sta_remove_link(sta, link_id);
+ continue;
+ }
+
if (link_id != assoc_data->assoc_link_id) {
err = ieee80211_sta_activate_link(sta, link_id);
if (err)
@@ -4954,6 +5002,9 @@ static bool ieee80211_assoc_success(struct ieee80211_sub_if_data *sdata,
}
}
+ /* links might have changed due to rejected ones, set them again */
+ ieee80211_vif_set_links(sdata, valid_links);
+
rate_control_rate_init(sta);
if (ifmgd->flags & IEEE80211_STA_MFP_ENABLED) {
@@ -5033,6 +5084,7 @@ static void ieee80211_rx_mgmt_assoc_resp(struct ieee80211_sub_if_data *sdata,
struct cfg80211_rx_assoc_resp resp = {
.uapsd_queues = -1,
};
+ u8 ap_mld_addr[ETH_ALEN] __aligned(2);
unsigned int link_id;
sdata_assert_lock(sdata);
@@ -5187,10 +5239,13 @@ static void ieee80211_rx_mgmt_assoc_resp(struct ieee80211_sub_if_data *sdata,
link = sdata_dereference(sdata->link[link_id], sdata);
if (!link)
continue;
+
if (!assoc_data->link[link_id].bss)
continue;
+
resp.links[link_id].bss = assoc_data->link[link_id].bss;
resp.links[link_id].addr = link->conf->addr;
+ resp.links[link_id].status = assoc_data->link[link_id].status;
/* get uapsd queues configuration - same for all links */
resp.uapsd_queues = 0;
@@ -5199,6 +5254,11 @@ static void ieee80211_rx_mgmt_assoc_resp(struct ieee80211_sub_if_data *sdata,
resp.uapsd_queues |= ieee80211_ac_to_qos_mask[ac];
}
+ if (sdata->vif.valid_links) {
+ ether_addr_copy(ap_mld_addr, sdata->vif.cfg.ap_addr);
+ resp.ap_mld_addr = ap_mld_addr;
+ }
+
ieee80211_destroy_assoc_data(sdata,
status_code == WLAN_STATUS_SUCCESS ?
ASSOC_SUCCESS :
@@ -5208,8 +5268,6 @@ static void ieee80211_rx_mgmt_assoc_resp(struct ieee80211_sub_if_data *sdata,
resp.len = len;
resp.req_ies = ifmgd->assoc_req_ies;
resp.req_ies_len = ifmgd->assoc_req_ies_len;
- if (sdata->vif.valid_links)
- resp.ap_mld_addr = sdata->vif.cfg.ap_addr;
cfg80211_rx_assoc_resp(sdata->dev, &resp);
notify_driver:
drv_mgd_complete_tx(sdata->local, sdata, &info);
@@ -5432,6 +5490,7 @@ static void ieee80211_rx_mgmt_beacon(struct ieee80211_link_data *link,
struct ieee802_11_elems *elems;
struct ieee80211_local *local = sdata->local;
struct ieee80211_chanctx_conf *chanctx_conf;
+ struct ieee80211_supported_band *sband;
struct ieee80211_channel *chan;
struct link_sta_info *link_sta;
struct sta_info *sta;
@@ -5694,7 +5753,12 @@ static void ieee80211_rx_mgmt_beacon(struct ieee80211_link_data *link,
goto free;
}
- changed |= ieee80211_recalc_twt_req(link, link_sta, elems);
+ if (WARN_ON(!link->conf->chandef.chan))
+ goto free;
+
+ sband = local->hw.wiphy->bands[link->conf->chandef.chan->band];
+
+ changed |= ieee80211_recalc_twt_req(sdata, sband, link, link_sta, elems);
if (ieee80211_config_bw(link, elems->ht_cap_elem,
elems->vht_cap_elem, elems->ht_operation,
@@ -6640,6 +6704,7 @@ int ieee80211_mgd_auth(struct ieee80211_sub_if_data *sdata,
req->ap_mld_addr ?: req->bss->bssid,
ETH_ALEN);
auth_data->bss = req->bss;
+ auth_data->link_id = req->link_id;
if (req->auth_data_len >= 4) {
if (req->auth_type == NL80211_AUTHTYPE_SAE) {
@@ -6658,7 +6723,8 @@ int ieee80211_mgd_auth(struct ieee80211_sub_if_data *sdata,
* removal and re-addition of the STA entry in
* ieee80211_prep_connection().
*/
- cont_auth = ifmgd->auth_data && req->bss == ifmgd->auth_data->bss;
+ cont_auth = ifmgd->auth_data && req->bss == ifmgd->auth_data->bss &&
+ ifmgd->auth_data->link_id == req->link_id;
if (req->ie && req->ie_len) {
memcpy(&auth_data->data[auth_data->data_len],
@@ -6982,7 +7048,8 @@ int ieee80211_mgd_assoc(struct ieee80211_sub_if_data *sdata,
/* keep sta info, bssid if matching */
match = ether_addr_equal(ifmgd->auth_data->ap_addr,
- assoc_data->ap_addr);
+ assoc_data->ap_addr) &&
+ ifmgd->auth_data->link_id == req->link_id;
ieee80211_destroy_auth_data(sdata, match);
}
diff --git a/net/mac80211/rc80211_minstrel_ht.c b/net/mac80211/rc80211_minstrel_ht.c
index 3d91b98db099..762346598338 100644
--- a/net/mac80211/rc80211_minstrel_ht.c
+++ b/net/mac80211/rc80211_minstrel_ht.c
@@ -1963,9 +1963,6 @@ minstrel_ht_alloc(struct ieee80211_hw *hw)
/* safe default, does not necessarily have to match hw properties */
mp->max_retry = 7;
- if (hw->max_rates >= 4)
- mp->has_mrr = true;
-
mp->hw = hw;
mp->update_interval = HZ / 20;
diff --git a/net/mac80211/rc80211_minstrel_ht.h b/net/mac80211/rc80211_minstrel_ht.h
index 1766ff0c78d3..4be0401f7721 100644
--- a/net/mac80211/rc80211_minstrel_ht.h
+++ b/net/mac80211/rc80211_minstrel_ht.h
@@ -74,7 +74,6 @@
struct minstrel_priv {
struct ieee80211_hw *hw;
- bool has_mrr;
unsigned int cw_min;
unsigned int cw_max;
unsigned int max_retry;
diff --git a/net/mac80211/rx.c b/net/mac80211/rx.c
index f99416d2e144..c28c6fbf786e 100644
--- a/net/mac80211/rx.c
+++ b/net/mac80211/rx.c
@@ -1571,9 +1571,6 @@ static void sta_ps_start(struct sta_info *sta)
ieee80211_clear_fast_xmit(sta);
- if (!sta->sta.txq[0])
- return;
-
for (tid = 0; tid < IEEE80211_NUM_TIDS; tid++) {
struct ieee80211_txq *txq = sta->sta.txq[tid];
struct txq_info *txqi = to_txq_info(txq);
diff --git a/net/mac80211/sta_info.c b/net/mac80211/sta_info.c
index cebfd148bb40..04e0f132b1d9 100644
--- a/net/mac80211/sta_info.c
+++ b/net/mac80211/sta_info.c
@@ -140,17 +140,15 @@ static void __cleanup_single_sta(struct sta_info *sta)
atomic_dec(&ps->num_sta_ps);
}
- if (sta->sta.txq[0]) {
- for (i = 0; i < ARRAY_SIZE(sta->sta.txq); i++) {
- struct txq_info *txqi;
+ for (i = 0; i < ARRAY_SIZE(sta->sta.txq); i++) {
+ struct txq_info *txqi;
- if (!sta->sta.txq[i])
- continue;
+ if (!sta->sta.txq[i])
+ continue;
- txqi = to_txq_info(sta->sta.txq[i]);
+ txqi = to_txq_info(sta->sta.txq[i]);
- ieee80211_txq_purge(local, txqi);
- }
+ ieee80211_txq_purge(local, txqi);
}
for (ac = 0; ac < IEEE80211_NUM_ACS; ac++) {
@@ -366,6 +364,9 @@ static void sta_remove_link(struct sta_info *sta, unsigned int link_id,
if (unhash)
link_sta_info_hash_del(sta->local, link_sta);
+ if (test_sta_flag(sta, WLAN_STA_INSERTED))
+ ieee80211_link_sta_debugfs_remove(link_sta);
+
if (link_sta != &sta->deflink)
alloc = container_of(link_sta, typeof(*alloc), info);
@@ -425,8 +426,7 @@ void sta_info_free(struct ieee80211_local *local, struct sta_info *sta)
sta_dbg(sta->sdata, "Destroyed STA %pM\n", sta->sta.addr);
- if (sta->sta.txq[0])
- kfree(to_txq_info(sta->sta.txq[0]));
+ kfree(to_txq_info(sta->sta.txq[0]));
kfree(rcu_dereference_raw(sta->sta.rates));
#ifdef CONFIG_MAC80211_MESH
kfree(sta->mesh);
@@ -511,6 +511,7 @@ static void sta_info_add_link(struct sta_info *sta,
link_info->sta = sta;
link_info->link_id = link_id;
link_info->pub = link_sta;
+ link_info->pub->sta = &sta->sta;
link_sta->link_id = link_id;
rcu_assign_pointer(sta->link[link_id], link_info);
rcu_assign_pointer(sta->sta.link[link_id], link_sta);
@@ -527,6 +528,8 @@ __sta_info_alloc(struct ieee80211_sub_if_data *sdata,
struct ieee80211_local *local = sdata->local;
struct ieee80211_hw *hw = &local->hw;
struct sta_info *sta;
+ void *txq_data;
+ int size;
int i;
sta = kzalloc(sizeof(*sta) + hw->sta_data_size, gfp);
@@ -596,21 +599,18 @@ __sta_info_alloc(struct ieee80211_sub_if_data *sdata,
sta->last_connected = ktime_get_seconds();
- if (local->ops->wake_tx_queue) {
- void *txq_data;
- int size = sizeof(struct txq_info) +
- ALIGN(hw->txq_data_size, sizeof(void *));
+ size = sizeof(struct txq_info) +
+ ALIGN(hw->txq_data_size, sizeof(void *));
- txq_data = kcalloc(ARRAY_SIZE(sta->sta.txq), size, gfp);
- if (!txq_data)
- goto free;
+ txq_data = kcalloc(ARRAY_SIZE(sta->sta.txq), size, gfp);
+ if (!txq_data)
+ goto free;
- for (i = 0; i < ARRAY_SIZE(sta->sta.txq); i++) {
- struct txq_info *txq = txq_data + i * size;
+ for (i = 0; i < ARRAY_SIZE(sta->sta.txq); i++) {
+ struct txq_info *txq = txq_data + i * size;
- /* might not do anything for the bufferable MMPDU TXQ */
- ieee80211_txq_init(sdata, sta, txq, i);
- }
+ /* might not do anything for the (bufferable) MMPDU TXQ */
+ ieee80211_txq_init(sdata, sta, txq, i);
}
if (sta_prepare_rate_control(local, sta, gfp))
@@ -684,8 +684,7 @@ __sta_info_alloc(struct ieee80211_sub_if_data *sdata,
return sta;
free_txq:
- if (sta->sta.txq[0])
- kfree(to_txq_info(sta->sta.txq[0]));
+ kfree(to_txq_info(sta->sta.txq[0]));
free:
sta_info_free_link(&sta->deflink);
#ifdef CONFIG_MAC80211_MESH
@@ -874,6 +873,26 @@ static int sta_info_insert_finish(struct sta_info *sta) __acquires(RCU)
ieee80211_sta_debugfs_add(sta);
rate_control_add_sta_debugfs(sta);
+ if (sta->sta.valid_links) {
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(sta->link); i++) {
+ struct link_sta_info *link_sta;
+
+ link_sta = rcu_dereference_protected(sta->link[i],
+ lockdep_is_held(&local->sta_mtx));
+
+ if (!link_sta)
+ continue;
+
+ ieee80211_link_sta_debugfs_add(link_sta);
+ if (sdata->vif.active_links & BIT(i))
+ ieee80211_link_sta_debugfs_drv_add(link_sta);
+ }
+ } else {
+ ieee80211_link_sta_debugfs_add(&sta->deflink);
+ ieee80211_link_sta_debugfs_drv_add(&sta->deflink);
+ }
sinfo->generation = local->sta_generation;
cfg80211_new_sta(sdata->dev, sta->sta.addr, sinfo, GFP_KERNEL);
@@ -1958,9 +1977,6 @@ ieee80211_sta_ps_deliver_response(struct sta_info *sta,
* TIM recalculation.
*/
- if (!sta->sta.txq[0])
- return;
-
for (tid = 0; tid < ARRAY_SIZE(sta->sta.txq); tid++) {
if (!sta->sta.txq[tid] ||
!(driver_release_tids & BIT(tid)) ||
@@ -2127,22 +2143,30 @@ void ieee80211_sta_register_airtime(struct ieee80211_sta *pubsta, u8 tid,
}
EXPORT_SYMBOL(ieee80211_sta_register_airtime);
-void ieee80211_sta_recalc_aggregates(struct ieee80211_sta *pubsta)
+void __ieee80211_sta_recalc_aggregates(struct sta_info *sta, u16 active_links)
{
- struct sta_info *sta = container_of(pubsta, struct sta_info, sta);
- struct ieee80211_link_sta *link_sta;
- int link_id, i;
bool first = true;
+ int link_id;
- if (!pubsta->valid_links || !pubsta->mlo) {
- pubsta->cur = &pubsta->deflink.agg;
+ if (!sta->sta.valid_links || !sta->sta.mlo) {
+ sta->sta.cur = &sta->sta.deflink.agg;
return;
}
rcu_read_lock();
- for_each_sta_active_link(&sta->sdata->vif, pubsta, link_sta, link_id) {
+ for (link_id = 0; link_id < ARRAY_SIZE((sta)->link); link_id++) {
+ struct ieee80211_link_sta *link_sta;
+ int i;
+
+ if (!(active_links & BIT(link_id)))
+ continue;
+
+ link_sta = rcu_dereference(sta->sta.link[link_id]);
+ if (!link_sta)
+ continue;
+
if (first) {
- sta->cur = pubsta->deflink.agg;
+ sta->cur = sta->sta.deflink.agg;
first = false;
continue;
}
@@ -2161,7 +2185,14 @@ void ieee80211_sta_recalc_aggregates(struct ieee80211_sta *pubsta)
}
rcu_read_unlock();
- pubsta->cur = &sta->cur;
+ sta->sta.cur = &sta->cur;
+}
+
+void ieee80211_sta_recalc_aggregates(struct ieee80211_sta *pubsta)
+{
+ struct sta_info *sta = container_of(pubsta, struct sta_info, sta);
+
+ __ieee80211_sta_recalc_aggregates(sta, sta->sdata->vif.active_links);
}
EXPORT_SYMBOL(ieee80211_sta_recalc_aggregates);
@@ -2396,9 +2427,9 @@ static inline u64 sta_get_tidstats_msdu(struct ieee80211_sta_rx_stats *rxstats,
u64 value;
do {
- start = u64_stats_fetch_begin_irq(&rxstats->syncp);
+ start = u64_stats_fetch_begin(&rxstats->syncp);
value = rxstats->msdu[tid];
- } while (u64_stats_fetch_retry_irq(&rxstats->syncp, start));
+ } while (u64_stats_fetch_retry(&rxstats->syncp, start));
return value;
}
@@ -2445,7 +2476,7 @@ static void sta_set_tidstats(struct sta_info *sta,
tidstats->tx_msdu_failed = sta->deflink.status_stats.msdu_failed[tid];
}
- if (local->ops->wake_tx_queue && tid < IEEE80211_NUM_TIDS) {
+ if (tid < IEEE80211_NUM_TIDS) {
spin_lock_bh(&local->fq.lock);
rcu_read_lock();
@@ -2464,9 +2495,9 @@ static inline u64 sta_get_stats_bytes(struct ieee80211_sta_rx_stats *rxstats)
u64 value;
do {
- start = u64_stats_fetch_begin_irq(&rxstats->syncp);
+ start = u64_stats_fetch_begin(&rxstats->syncp);
value = rxstats->bytes;
- } while (u64_stats_fetch_retry_irq(&rxstats->syncp, start));
+ } while (u64_stats_fetch_retry(&rxstats->syncp, start));
return value;
}
@@ -2773,9 +2804,6 @@ unsigned long ieee80211_sta_last_active(struct sta_info *sta)
static void sta_update_codel_params(struct sta_info *sta, u32 thr)
{
- if (!sta->sdata->local->ops->wake_tx_queue)
- return;
-
if (thr && thr < STA_SLOW_THRESHOLD * sta->local->num_sta) {
sta->cparams.target = MS2TIME(50);
sta->cparams.interval = MS2TIME(300);
@@ -2823,6 +2851,8 @@ int ieee80211_sta_allocate_link(struct sta_info *sta, unsigned int link_id)
sta_info_add_link(sta, link_id, &alloc->info, &alloc->sta);
+ ieee80211_link_sta_debugfs_add(&alloc->info);
+
return 0;
}
diff --git a/net/mac80211/sta_info.h b/net/mac80211/sta_info.h
index 2517ea714dc4..69820b551668 100644
--- a/net/mac80211/sta_info.h
+++ b/net/mac80211/sta_info.h
@@ -513,6 +513,7 @@ struct ieee80211_fragment_cache {
* @status_stats.avg_ack_signal: average ACK signal
* @cur_max_bandwidth: maximum bandwidth to use for TX to the station,
* taken from HT/VHT capabilities or VHT operating mode notification
+ * @debugfs_dir: debug filesystem directory dentry
* @pub: public (driver visible) link STA data
* TODO Move other link params from sta_info as required for MLD operation
*/
@@ -560,6 +561,10 @@ struct link_sta_info {
enum ieee80211_sta_rx_bandwidth cur_max_bandwidth;
+#ifdef CONFIG_MAC80211_DEBUGFS
+ struct dentry *debugfs_dir;
+#endif
+
struct ieee80211_link_sta *pub;
};
@@ -922,6 +927,8 @@ void ieee80211_sta_set_max_amsdu_subframes(struct sta_info *sta,
const u8 *ext_capab,
unsigned int ext_capab_len);
+void __ieee80211_sta_recalc_aggregates(struct sta_info *sta, u16 active_links);
+
enum sta_stats_type {
STA_STATS_RATE_TYPE_INVALID = 0,
STA_STATS_RATE_TYPE_LEGACY,
diff --git a/net/mac80211/tdls.c b/net/mac80211/tdls.c
index f4b4d25eef95..b255f3b5bf01 100644
--- a/net/mac80211/tdls.c
+++ b/net/mac80211/tdls.c
@@ -1016,7 +1016,6 @@ ieee80211_tdls_prep_mgmt_packet(struct wiphy *wiphy, struct net_device *dev,
skb->priority = 256 + 5;
break;
}
- skb_set_queue_mapping(skb, ieee80211_select_queue(sdata, skb));
/*
* Set the WLAN_TDLS_TEARDOWN flag to indicate a teardown in progress.
diff --git a/net/mac80211/tx.c b/net/mac80211/tx.c
index 874f2a4d831d..165ac0711d71 100644
--- a/net/mac80211/tx.c
+++ b/net/mac80211/tx.c
@@ -1599,9 +1599,6 @@ int ieee80211_txq_setup_flows(struct ieee80211_local *local)
bool supp_vht = false;
enum nl80211_band band;
- if (!local->ops->wake_tx_queue)
- return 0;
-
ret = fq_init(fq, 4096);
if (ret)
return ret;
@@ -1649,9 +1646,6 @@ void ieee80211_txq_teardown_flows(struct ieee80211_local *local)
{
struct fq *fq = &local->fq;
- if (!local->ops->wake_tx_queue)
- return;
-
kfree(local->cvars);
local->cvars = NULL;
@@ -1668,8 +1662,7 @@ static bool ieee80211_queue_skb(struct ieee80211_local *local,
struct ieee80211_vif *vif;
struct txq_info *txqi;
- if (!local->ops->wake_tx_queue ||
- sdata->vif.type == NL80211_IFTYPE_MONITOR)
+ if (sdata->vif.type == NL80211_IFTYPE_MONITOR)
return false;
if (sdata->vif.type == NL80211_IFTYPE_AP_VLAN)
@@ -2973,7 +2966,7 @@ static struct sk_buff *ieee80211_build_hdr(struct ieee80211_sub_if_data *sdata,
if (pre_conf_link_id != link_id &&
link_id != IEEE80211_LINK_UNSPECIFIED) {
-#ifdef CPTCFG_MAC80211_VERBOSE_DEBUG
+#ifdef CONFIG_MAC80211_VERBOSE_DEBUG
net_info_ratelimited("%s: dropped frame to %pM with bad link ID request (%d vs. %d)\n",
sdata->name, hdr.addr1,
pre_conf_link_id, link_id);
@@ -4184,12 +4177,7 @@ void __ieee80211_subif_start_xmit(struct sk_buff *skb,
if (IS_ERR(sta))
sta = NULL;
- if (local->ops->wake_tx_queue) {
- u16 queue = __ieee80211_select_queue(sdata, sta, skb);
- skb_set_queue_mapping(skb, queue);
- skb_get_hash(skb);
- }
-
+ skb_set_queue_mapping(skb, ieee80211_select_queue(sdata, sta, skb));
ieee80211_aggr_check(sdata, sta, skb);
sk_pacing_shift_update(skb->sk, sdata->local->hw.tx_sk_pacing_shift);
@@ -4500,11 +4488,7 @@ static void ieee80211_8023_xmit(struct ieee80211_sub_if_data *sdata,
struct tid_ampdu_tx *tid_tx;
u8 tid;
- if (local->ops->wake_tx_queue) {
- u16 queue = __ieee80211_select_queue(sdata, sta, skb);
- skb_set_queue_mapping(skb, queue);
- skb_get_hash(skb);
- }
+ skb_set_queue_mapping(skb, ieee80211_select_queue(sdata, sta, skb));
if (unlikely(test_bit(SCAN_SW_SCANNING, &local->scanning)) &&
test_bit(SDATA_STATE_OFFCHANNEL, &sdata->state))
@@ -4758,9 +4742,6 @@ void ieee80211_tx_pending(struct tasklet_struct *t)
if (!txok)
break;
}
-
- if (skb_queue_empty(&local->pending[i]))
- ieee80211_propagate_queue_wake(local, i);
}
spin_unlock_irqrestore(&local->queue_stop_reason_lock, flags);
@@ -5953,10 +5934,9 @@ int ieee80211_tx_control_port(struct wiphy *wiphy, struct net_device *dev,
}
if (!IS_ERR(sta)) {
- u16 queue = __ieee80211_select_queue(sdata, sta, skb);
+ u16 queue = ieee80211_select_queue(sdata, sta, skb);
skb_set_queue_mapping(skb, queue);
- skb_get_hash(skb);
/*
* for MLO STA, the SA should be the AP MLD address, but
diff --git a/net/mac80211/util.c b/net/mac80211/util.c
index b512cb37aafb..6f5407038459 100644
--- a/net/mac80211/util.c
+++ b/net/mac80211/util.c
@@ -288,6 +288,52 @@ __le16 ieee80211_ctstoself_duration(struct ieee80211_hw *hw,
}
EXPORT_SYMBOL(ieee80211_ctstoself_duration);
+static void wake_tx_push_queue(struct ieee80211_local *local,
+ struct ieee80211_sub_if_data *sdata,
+ struct ieee80211_txq *queue)
+{
+ int q = sdata->vif.hw_queue[queue->ac];
+ struct ieee80211_tx_control control = {
+ .sta = queue->sta,
+ };
+ struct sk_buff *skb;
+ unsigned long flags;
+ bool q_stopped;
+
+ while (1) {
+ spin_lock_irqsave(&local->queue_stop_reason_lock, flags);
+ q_stopped = local->queue_stop_reasons[q];
+ spin_unlock_irqrestore(&local->queue_stop_reason_lock, flags);
+
+ if (q_stopped)
+ break;
+
+ skb = ieee80211_tx_dequeue(&local->hw, queue);
+ if (!skb)
+ break;
+
+ drv_tx(local, &control, skb);
+ }
+}
+
+/* wake_tx_queue handler for driver not implementing a custom one*/
+void ieee80211_handle_wake_tx_queue(struct ieee80211_hw *hw,
+ struct ieee80211_txq *txq)
+{
+ struct ieee80211_local *local = hw_to_local(hw);
+ struct ieee80211_sub_if_data *sdata = vif_to_sdata(txq->vif);
+ struct ieee80211_txq *queue;
+
+ /* Use ieee80211_next_txq() for airtime fairness accounting */
+ ieee80211_txq_schedule_start(hw, txq->ac);
+ while ((queue = ieee80211_next_txq(hw, txq->ac))) {
+ wake_tx_push_queue(local, sdata, queue);
+ ieee80211_return_txq(hw, queue, false);
+ }
+ ieee80211_txq_schedule_end(hw, txq->ac);
+}
+EXPORT_SYMBOL(ieee80211_handle_wake_tx_queue);
+
static void __ieee80211_wake_txqs(struct ieee80211_sub_if_data *sdata, int ac)
{
struct ieee80211_local *local = sdata->local;
@@ -400,39 +446,6 @@ void ieee80211_wake_txqs(struct tasklet_struct *t)
spin_unlock_irqrestore(&local->queue_stop_reason_lock, flags);
}
-void ieee80211_propagate_queue_wake(struct ieee80211_local *local, int queue)
-{
- struct ieee80211_sub_if_data *sdata;
- int n_acs = IEEE80211_NUM_ACS;
-
- if (local->ops->wake_tx_queue)
- return;
-
- if (local->hw.queues < IEEE80211_NUM_ACS)
- n_acs = 1;
-
- list_for_each_entry_rcu(sdata, &local->interfaces, list) {
- int ac;
-
- if (!sdata->dev)
- continue;
-
- if (sdata->vif.cab_queue != IEEE80211_INVAL_HW_QUEUE &&
- local->queue_stop_reasons[sdata->vif.cab_queue] != 0)
- continue;
-
- for (ac = 0; ac < n_acs; ac++) {
- int ac_queue = sdata->vif.hw_queue[ac];
-
- if (ac_queue == queue ||
- (sdata->vif.cab_queue == queue &&
- local->queue_stop_reasons[ac_queue] == 0 &&
- skb_queue_empty(&local->pending[ac_queue])))
- netif_wake_subqueue(sdata->dev, ac);
- }
- }
-}
-
static void __ieee80211_wake_queue(struct ieee80211_hw *hw, int queue,
enum queue_stop_reason reason,
bool refcounted,
@@ -463,11 +476,7 @@ static void __ieee80211_wake_queue(struct ieee80211_hw *hw, int queue,
/* someone still has this queue stopped */
return;
- if (skb_queue_empty(&local->pending[queue])) {
- rcu_read_lock();
- ieee80211_propagate_queue_wake(local, queue);
- rcu_read_unlock();
- } else
+ if (!skb_queue_empty(&local->pending[queue]))
tasklet_schedule(&local->tx_pending_tasklet);
/*
@@ -477,12 +486,10 @@ static void __ieee80211_wake_queue(struct ieee80211_hw *hw, int queue,
* release someone's lock, but it is fine because all the callers of
* __ieee80211_wake_queue call it right before releasing the lock.
*/
- if (local->ops->wake_tx_queue) {
- if (reason == IEEE80211_QUEUE_STOP_REASON_DRIVER)
- tasklet_schedule(&local->wake_txqs_tasklet);
- else
- _ieee80211_wake_txqs(local, flags);
- }
+ if (reason == IEEE80211_QUEUE_STOP_REASON_DRIVER)
+ tasklet_schedule(&local->wake_txqs_tasklet);
+ else
+ _ieee80211_wake_txqs(local, flags);
}
void ieee80211_wake_queue_by_reason(struct ieee80211_hw *hw, int queue,
@@ -539,10 +546,6 @@ static void __ieee80211_stop_queue(struct ieee80211_hw *hw, int queue,
for (ac = 0; ac < n_acs; ac++) {
if (sdata->vif.hw_queue[ac] == queue ||
sdata->vif.cab_queue == queue) {
- if (!local->ops->wake_tx_queue) {
- netif_stop_subqueue(sdata->dev, ac);
- continue;
- }
spin_lock(&local->fq.lock);
sdata->vif.txqs_stopped[ac] = true;
spin_unlock(&local->fq.lock);
@@ -1026,8 +1029,10 @@ ieee80211_parse_extension_element(u32 *crc,
elems->eht_operation = data;
break;
case WLAN_EID_EXT_EHT_MULTI_LINK:
- if (ieee80211_mle_size_ok(data, len))
+ if (ieee80211_mle_size_ok(data, len)) {
elems->multi_link = (void *)data;
+ elems->multi_link_len = len;
+ }
break;
}
}
@@ -1499,6 +1504,145 @@ static size_t ieee802_11_find_bssid_profile(const u8 *start, size_t len,
return found ? profile_len : 0;
}
+static void ieee80211_defragment_element(struct ieee802_11_elems *elems,
+ void **elem_ptr, size_t *len,
+ size_t total_len, u8 frag_id)
+{
+ u8 *data = *elem_ptr, *pos, *start;
+ const struct element *elem;
+
+ /*
+ * Since 'data' points to the data of the element, not the element
+ * itself, allow 254 in case it was an extended element where the
+ * extended ID isn't part of the data we see here and thus not part of
+ * 'len' either.
+ */
+ if (!data || (*len != 254 && *len != 255))
+ return;
+
+ start = elems->scratch_pos;
+
+ if (WARN_ON(*len > (elems->scratch + elems->scratch_len -
+ elems->scratch_pos)))
+ return;
+
+ memcpy(elems->scratch_pos, data, *len);
+ elems->scratch_pos += *len;
+
+ pos = data + *len;
+ total_len -= *len;
+ for_each_element(elem, pos, total_len) {
+ if (elem->id != frag_id)
+ break;
+
+ if (WARN_ON(elem->datalen >
+ (elems->scratch + elems->scratch_len -
+ elems->scratch_pos)))
+ return;
+
+ memcpy(elems->scratch_pos, elem->data, elem->datalen);
+ elems->scratch_pos += elem->datalen;
+
+ *len += elem->datalen;
+ }
+
+ *elem_ptr = start;
+}
+
+static void ieee80211_mle_get_sta_prof(struct ieee802_11_elems *elems,
+ u8 link_id)
+{
+ const struct ieee80211_multi_link_elem *ml = elems->multi_link;
+ size_t ml_len = elems->multi_link_len;
+ const struct element *sub;
+
+ if (!ml || !ml_len)
+ return;
+
+ if (le16_get_bits(ml->control, IEEE80211_ML_CONTROL_TYPE) !=
+ IEEE80211_ML_CONTROL_TYPE_BASIC)
+ return;
+
+ for_each_mle_subelement(sub, (u8 *)ml, ml_len) {
+ struct ieee80211_mle_per_sta_profile *prof = (void *)sub->data;
+ u16 control;
+
+ if (sub->id != IEEE80211_MLE_SUBELEM_PER_STA_PROFILE)
+ continue;
+
+ if (!ieee80211_mle_sta_prof_size_ok(sub->data, sub->datalen))
+ return;
+
+ control = le16_to_cpu(prof->control);
+
+ if (link_id != u16_get_bits(control,
+ IEEE80211_MLE_STA_CONTROL_LINK_ID))
+ continue;
+
+ if (!(control & IEEE80211_MLE_STA_CONTROL_COMPLETE_PROFILE))
+ return;
+
+ elems->prof = prof;
+ elems->sta_prof_len = sub->datalen;
+
+ /* the sub element can be fragmented */
+ ieee80211_defragment_element(elems, (void **)&elems->prof,
+ &elems->sta_prof_len,
+ ml_len - (sub->data - (u8 *)ml),
+ IEEE80211_MLE_SUBELEM_FRAGMENT);
+ return;
+ }
+}
+
+static void ieee80211_mle_parse_link(struct ieee802_11_elems *elems,
+ struct ieee80211_elems_parse_params *params)
+{
+ struct ieee80211_mle_per_sta_profile *prof;
+ struct ieee80211_elems_parse_params sub = {
+ .action = params->action,
+ .from_ap = params->from_ap,
+ .link_id = -1,
+ };
+ const struct element *non_inherit = NULL;
+ const u8 *end;
+
+ if (params->link_id == -1)
+ return;
+
+ ieee80211_defragment_element(elems, (void **)&elems->multi_link,
+ &elems->multi_link_len,
+ elems->total_len - ((u8 *)elems->multi_link -
+ elems->ie_start),
+ WLAN_EID_FRAGMENT);
+
+ ieee80211_mle_get_sta_prof(elems, params->link_id);
+ prof = elems->prof;
+
+ if (!prof)
+ return;
+
+ /* check if we have the 4 bytes for the fixed part in assoc response */
+ if (elems->sta_prof_len < sizeof(*prof) + prof->sta_info_len - 1 + 4) {
+ elems->prof = NULL;
+ elems->sta_prof_len = 0;
+ return;
+ }
+
+ /*
+ * Skip the capability information and the status code that are expected
+ * as part of the station profile in association response frames. Note
+ * the -1 is because the 'sta_info_len' is accounted to as part of the
+ * per-STA profile, but not part of the 'u8 variable[]' portion.
+ */
+ sub.start = prof->variable + prof->sta_info_len - 1 + 4;
+ end = (const u8 *)prof + elems->sta_prof_len;
+ sub.len = end - sub.start;
+
+ non_inherit = cfg80211_find_ext_elem(WLAN_EID_EXT_NON_INHERITANCE,
+ sub.start, sub.len);
+ _ieee802_11_parse_elems_full(&sub, elems, non_inherit);
+}
+
struct ieee802_11_elems *
ieee802_11_parse_elems_full(struct ieee80211_elems_parse_params *params)
{
@@ -1506,7 +1650,7 @@ ieee802_11_parse_elems_full(struct ieee80211_elems_parse_params *params)
const struct element *non_inherit = NULL;
u8 *nontransmitted_profile;
int nontransmitted_profile_len = 0;
- size_t scratch_len = params->len;
+ size_t scratch_len = params->scratch_len ?: 3 * params->len;
elems = kzalloc(sizeof(*elems) + scratch_len, GFP_ATOMIC);
if (!elems)
@@ -1541,6 +1685,8 @@ ieee802_11_parse_elems_full(struct ieee80211_elems_parse_params *params)
_ieee802_11_parse_elems_full(&sub, elems, NULL);
}
+ ieee80211_mle_parse_link(elems, params);
+
if (elems->tim && !elems->parse_error) {
const struct ieee80211_tim_ie *tim_ie = elems->tim;
diff --git a/net/mac80211/wme.c b/net/mac80211/wme.c
index ecc1de2e68a5..a12c63638680 100644
--- a/net/mac80211/wme.c
+++ b/net/mac80211/wme.c
@@ -122,6 +122,9 @@ u16 ieee80211_select_queue_80211(struct ieee80211_sub_if_data *sdata,
struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
u8 *p;
+ /* Ensure hash is set prior to potential SW encryption */
+ skb_get_hash(skb);
+
if ((info->control.flags & IEEE80211_TX_CTRL_DONT_REORDER) ||
local->hw.queues < IEEE80211_NUM_ACS)
return 0;
@@ -141,12 +144,15 @@ u16 ieee80211_select_queue_80211(struct ieee80211_sub_if_data *sdata,
return ieee80211_downgrade_queue(sdata, NULL, skb);
}
-u16 __ieee80211_select_queue(struct ieee80211_sub_if_data *sdata,
- struct sta_info *sta, struct sk_buff *skb)
+u16 ieee80211_select_queue(struct ieee80211_sub_if_data *sdata,
+ struct sta_info *sta, struct sk_buff *skb)
{
struct mac80211_qos_map *qos_map;
bool qos;
+ /* Ensure hash is set prior to potential SW encryption */
+ skb_get_hash(skb);
+
/* all mesh/ocb stations are required to support WME */
if (sta && (sdata->vif.type == NL80211_IFTYPE_MESH_POINT ||
sdata->vif.type == NL80211_IFTYPE_OCB))
@@ -176,59 +182,6 @@ u16 __ieee80211_select_queue(struct ieee80211_sub_if_data *sdata,
return ieee80211_downgrade_queue(sdata, sta, skb);
}
-
-/* Indicate which queue to use. */
-u16 ieee80211_select_queue(struct ieee80211_sub_if_data *sdata,
- struct sk_buff *skb)
-{
- struct ieee80211_local *local = sdata->local;
- struct sta_info *sta = NULL;
- const u8 *ra = NULL;
- u16 ret;
-
- /* when using iTXQ, we can do this later */
- if (local->ops->wake_tx_queue)
- return 0;
-
- if (local->hw.queues < IEEE80211_NUM_ACS || skb->len < 6) {
- skb->priority = 0; /* required for correct WPA/11i MIC */
- return 0;
- }
-
- rcu_read_lock();
- switch (sdata->vif.type) {
- case NL80211_IFTYPE_AP_VLAN:
- sta = rcu_dereference(sdata->u.vlan.sta);
- if (sta)
- break;
- fallthrough;
- case NL80211_IFTYPE_AP:
- ra = skb->data;
- break;
- case NL80211_IFTYPE_STATION:
- /* might be a TDLS station */
- sta = sta_info_get(sdata, skb->data);
- if (sta)
- break;
-
- ra = sdata->deflink.u.mgd.bssid;
- break;
- case NL80211_IFTYPE_ADHOC:
- ra = skb->data;
- break;
- default:
- break;
- }
-
- if (!sta && ra && !is_multicast_ether_addr(ra))
- sta = sta_info_get(sdata, ra);
-
- ret = __ieee80211_select_queue(sdata, sta, skb);
-
- rcu_read_unlock();
- return ret;
-}
-
/**
* ieee80211_set_qos_hdr - Fill in the QoS header if there is one.
*
diff --git a/net/mac80211/wme.h b/net/mac80211/wme.h
index 2e3dec0b6087..81f0039527a9 100644
--- a/net/mac80211/wme.h
+++ b/net/mac80211/wme.h
@@ -13,10 +13,8 @@
u16 ieee80211_select_queue_80211(struct ieee80211_sub_if_data *sdata,
struct sk_buff *skb,
struct ieee80211_hdr *hdr);
-u16 __ieee80211_select_queue(struct ieee80211_sub_if_data *sdata,
- struct sta_info *sta, struct sk_buff *skb);
u16 ieee80211_select_queue(struct ieee80211_sub_if_data *sdata,
- struct sk_buff *skb);
+ struct sta_info *sta, struct sk_buff *skb);
void ieee80211_set_qos_hdr(struct ieee80211_sub_if_data *sdata,
struct sk_buff *skb);
diff --git a/net/mac802154/cfg.c b/net/mac802154/cfg.c
index 1e4a9f74ed43..dc2d918fac68 100644
--- a/net/mac802154/cfg.c
+++ b/net/mac802154/cfg.c
@@ -46,7 +46,7 @@ static int ieee802154_suspend(struct wpan_phy *wpan_phy)
if (!local->open_count)
goto suspend;
- ieee802154_stop_queue(&local->hw);
+ ieee802154_sync_and_hold_queue(local);
synchronize_net();
/* stop hardware - this must stop RX */
@@ -67,12 +67,12 @@ static int ieee802154_resume(struct wpan_phy *wpan_phy)
goto wake_up;
/* restart hardware */
- ret = drv_start(local);
+ ret = drv_start(local, local->phy->filtering, &local->addr_filt);
if (ret)
return ret;
wake_up:
- ieee802154_wake_queue(&local->hw);
+ ieee802154_release_queue(local);
local->suspended = false;
return 0;
}
diff --git a/net/mac802154/driver-ops.h b/net/mac802154/driver-ops.h
index d23f0db98015..a7af3f0ddb3e 100644
--- a/net/mac802154/driver-ops.h
+++ b/net/mac802154/driver-ops.h
@@ -24,203 +24,290 @@ drv_xmit_sync(struct ieee802154_local *local, struct sk_buff *skb)
return local->ops->xmit_sync(&local->hw, skb);
}
-static inline int drv_start(struct ieee802154_local *local)
+static inline int drv_set_pan_id(struct ieee802154_local *local, __le16 pan_id)
{
+ struct ieee802154_hw_addr_filt filt;
int ret;
might_sleep();
- trace_802154_drv_start(local);
- local->started = true;
- smp_mb();
- ret = local->ops->start(&local->hw);
+ if (!local->ops->set_hw_addr_filt) {
+ WARN_ON(1);
+ return -EOPNOTSUPP;
+ }
+
+ filt.pan_id = pan_id;
+
+ trace_802154_drv_set_pan_id(local, pan_id);
+ ret = local->ops->set_hw_addr_filt(&local->hw, &filt,
+ IEEE802154_AFILT_PANID_CHANGED);
trace_802154_drv_return_int(local, ret);
return ret;
}
-static inline void drv_stop(struct ieee802154_local *local)
+static inline int
+drv_set_extended_addr(struct ieee802154_local *local, __le64 extended_addr)
{
- might_sleep();
+ struct ieee802154_hw_addr_filt filt;
+ int ret;
- trace_802154_drv_stop(local);
- local->ops->stop(&local->hw);
- trace_802154_drv_return_void(local);
+ might_sleep();
- /* sync away all work on the tasklet before clearing started */
- tasklet_disable(&local->tasklet);
- tasklet_enable(&local->tasklet);
+ if (!local->ops->set_hw_addr_filt) {
+ WARN_ON(1);
+ return -EOPNOTSUPP;
+ }
- barrier();
+ filt.ieee_addr = extended_addr;
- local->started = false;
+ trace_802154_drv_set_extended_addr(local, extended_addr);
+ ret = local->ops->set_hw_addr_filt(&local->hw, &filt,
+ IEEE802154_AFILT_IEEEADDR_CHANGED);
+ trace_802154_drv_return_int(local, ret);
+ return ret;
}
static inline int
-drv_set_channel(struct ieee802154_local *local, u8 page, u8 channel)
+drv_set_short_addr(struct ieee802154_local *local, __le16 short_addr)
{
+ struct ieee802154_hw_addr_filt filt;
int ret;
might_sleep();
- trace_802154_drv_set_channel(local, page, channel);
- ret = local->ops->set_channel(&local->hw, page, channel);
+ if (!local->ops->set_hw_addr_filt) {
+ WARN_ON(1);
+ return -EOPNOTSUPP;
+ }
+
+ filt.short_addr = short_addr;
+
+ trace_802154_drv_set_short_addr(local, short_addr);
+ ret = local->ops->set_hw_addr_filt(&local->hw, &filt,
+ IEEE802154_AFILT_SADDR_CHANGED);
trace_802154_drv_return_int(local, ret);
return ret;
}
-static inline int drv_set_tx_power(struct ieee802154_local *local, s32 mbm)
+static inline int
+drv_set_pan_coord(struct ieee802154_local *local, bool is_coord)
{
+ struct ieee802154_hw_addr_filt filt;
int ret;
might_sleep();
- if (!local->ops->set_txpower) {
+ if (!local->ops->set_hw_addr_filt) {
WARN_ON(1);
return -EOPNOTSUPP;
}
- trace_802154_drv_set_tx_power(local, mbm);
- ret = local->ops->set_txpower(&local->hw, mbm);
+ filt.pan_coord = is_coord;
+
+ trace_802154_drv_set_pan_coord(local, is_coord);
+ ret = local->ops->set_hw_addr_filt(&local->hw, &filt,
+ IEEE802154_AFILT_PANC_CHANGED);
trace_802154_drv_return_int(local, ret);
return ret;
}
-static inline int drv_set_cca_mode(struct ieee802154_local *local,
- const struct wpan_phy_cca *cca)
+static inline int
+drv_set_promiscuous_mode(struct ieee802154_local *local, bool on)
{
int ret;
might_sleep();
- if (!local->ops->set_cca_mode) {
+ if (!local->ops->set_promiscuous_mode) {
WARN_ON(1);
return -EOPNOTSUPP;
}
- trace_802154_drv_set_cca_mode(local, cca);
- ret = local->ops->set_cca_mode(&local->hw, cca);
+ trace_802154_drv_set_promiscuous_mode(local, on);
+ ret = local->ops->set_promiscuous_mode(&local->hw, on);
trace_802154_drv_return_int(local, ret);
return ret;
}
-static inline int drv_set_lbt_mode(struct ieee802154_local *local, bool mode)
+static inline int drv_start(struct ieee802154_local *local,
+ enum ieee802154_filtering_level level,
+ const struct ieee802154_hw_addr_filt *addr_filt)
{
int ret;
might_sleep();
- if (!local->ops->set_lbt) {
+ /* setup receive mode parameters e.g. address mode */
+ if (local->hw.flags & IEEE802154_HW_AFILT) {
+ ret = drv_set_pan_id(local, addr_filt->pan_id);
+ if (ret < 0)
+ return ret;
+
+ ret = drv_set_short_addr(local, addr_filt->short_addr);
+ if (ret < 0)
+ return ret;
+
+ ret = drv_set_extended_addr(local, addr_filt->ieee_addr);
+ if (ret < 0)
+ return ret;
+ }
+
+ switch (level) {
+ case IEEE802154_FILTERING_NONE:
+ fallthrough;
+ case IEEE802154_FILTERING_1_FCS:
+ fallthrough;
+ case IEEE802154_FILTERING_2_PROMISCUOUS:
+ /* TODO: Requires a different receive mode setup e.g.
+ * at86rf233 hardware.
+ */
+ fallthrough;
+ case IEEE802154_FILTERING_3_SCAN:
+ if (local->hw.flags & IEEE802154_HW_PROMISCUOUS) {
+ ret = drv_set_promiscuous_mode(local, true);
+ if (ret < 0)
+ return ret;
+ } else {
+ return -EOPNOTSUPP;
+ }
+
+ /* In practice other filtering levels can be requested, but as
+ * for now most hardware/drivers only support
+ * IEEE802154_FILTERING_NONE, we fallback to this actual
+ * filtering level in hardware and make our own additional
+ * filtering in mac802154 receive path.
+ *
+ * TODO: Move this logic to the device drivers as hardware may
+ * support more higher level filters. Hardware may also require
+ * a different order how register are set, which could currently
+ * be buggy, so all received parameters need to be moved to the
+ * start() callback and let the driver go into the mode before
+ * it will turn on receive handling.
+ */
+ local->phy->filtering = IEEE802154_FILTERING_NONE;
+ break;
+ case IEEE802154_FILTERING_4_FRAME_FIELDS:
+ /* Do not error out if IEEE802154_HW_PROMISCUOUS because we
+ * expect the hardware to operate at the level
+ * IEEE802154_FILTERING_4_FRAME_FIELDS anyway.
+ */
+ if (local->hw.flags & IEEE802154_HW_PROMISCUOUS) {
+ ret = drv_set_promiscuous_mode(local, false);
+ if (ret < 0)
+ return ret;
+ }
+
+ local->phy->filtering = IEEE802154_FILTERING_4_FRAME_FIELDS;
+ break;
+ default:
WARN_ON(1);
- return -EOPNOTSUPP;
+ return -EINVAL;
}
- trace_802154_drv_set_lbt_mode(local, mode);
- ret = local->ops->set_lbt(&local->hw, mode);
+ trace_802154_drv_start(local);
+ local->started = true;
+ smp_mb();
+ ret = local->ops->start(&local->hw);
trace_802154_drv_return_int(local, ret);
return ret;
}
+static inline void drv_stop(struct ieee802154_local *local)
+{
+ might_sleep();
+
+ trace_802154_drv_stop(local);
+ local->ops->stop(&local->hw);
+ trace_802154_drv_return_void(local);
+
+ /* sync away all work on the tasklet before clearing started */
+ tasklet_disable(&local->tasklet);
+ tasklet_enable(&local->tasklet);
+
+ barrier();
+
+ local->started = false;
+}
+
static inline int
-drv_set_cca_ed_level(struct ieee802154_local *local, s32 mbm)
+drv_set_channel(struct ieee802154_local *local, u8 page, u8 channel)
{
int ret;
might_sleep();
- if (!local->ops->set_cca_ed_level) {
- WARN_ON(1);
- return -EOPNOTSUPP;
- }
-
- trace_802154_drv_set_cca_ed_level(local, mbm);
- ret = local->ops->set_cca_ed_level(&local->hw, mbm);
+ trace_802154_drv_set_channel(local, page, channel);
+ ret = local->ops->set_channel(&local->hw, page, channel);
trace_802154_drv_return_int(local, ret);
return ret;
}
-static inline int drv_set_pan_id(struct ieee802154_local *local, __le16 pan_id)
+static inline int drv_set_tx_power(struct ieee802154_local *local, s32 mbm)
{
- struct ieee802154_hw_addr_filt filt;
int ret;
might_sleep();
- if (!local->ops->set_hw_addr_filt) {
+ if (!local->ops->set_txpower) {
WARN_ON(1);
return -EOPNOTSUPP;
}
- filt.pan_id = pan_id;
-
- trace_802154_drv_set_pan_id(local, pan_id);
- ret = local->ops->set_hw_addr_filt(&local->hw, &filt,
- IEEE802154_AFILT_PANID_CHANGED);
+ trace_802154_drv_set_tx_power(local, mbm);
+ ret = local->ops->set_txpower(&local->hw, mbm);
trace_802154_drv_return_int(local, ret);
return ret;
}
-static inline int
-drv_set_extended_addr(struct ieee802154_local *local, __le64 extended_addr)
+static inline int drv_set_cca_mode(struct ieee802154_local *local,
+ const struct wpan_phy_cca *cca)
{
- struct ieee802154_hw_addr_filt filt;
int ret;
might_sleep();
- if (!local->ops->set_hw_addr_filt) {
+ if (!local->ops->set_cca_mode) {
WARN_ON(1);
return -EOPNOTSUPP;
}
- filt.ieee_addr = extended_addr;
-
- trace_802154_drv_set_extended_addr(local, extended_addr);
- ret = local->ops->set_hw_addr_filt(&local->hw, &filt,
- IEEE802154_AFILT_IEEEADDR_CHANGED);
+ trace_802154_drv_set_cca_mode(local, cca);
+ ret = local->ops->set_cca_mode(&local->hw, cca);
trace_802154_drv_return_int(local, ret);
return ret;
}
-static inline int
-drv_set_short_addr(struct ieee802154_local *local, __le16 short_addr)
+static inline int drv_set_lbt_mode(struct ieee802154_local *local, bool mode)
{
- struct ieee802154_hw_addr_filt filt;
int ret;
might_sleep();
- if (!local->ops->set_hw_addr_filt) {
+ if (!local->ops->set_lbt) {
WARN_ON(1);
return -EOPNOTSUPP;
}
- filt.short_addr = short_addr;
-
- trace_802154_drv_set_short_addr(local, short_addr);
- ret = local->ops->set_hw_addr_filt(&local->hw, &filt,
- IEEE802154_AFILT_SADDR_CHANGED);
+ trace_802154_drv_set_lbt_mode(local, mode);
+ ret = local->ops->set_lbt(&local->hw, mode);
trace_802154_drv_return_int(local, ret);
return ret;
}
static inline int
-drv_set_pan_coord(struct ieee802154_local *local, bool is_coord)
+drv_set_cca_ed_level(struct ieee802154_local *local, s32 mbm)
{
- struct ieee802154_hw_addr_filt filt;
int ret;
might_sleep();
- if (!local->ops->set_hw_addr_filt) {
+ if (!local->ops->set_cca_ed_level) {
WARN_ON(1);
return -EOPNOTSUPP;
}
- filt.pan_coord = is_coord;
-
- trace_802154_drv_set_pan_coord(local, is_coord);
- ret = local->ops->set_hw_addr_filt(&local->hw, &filt,
- IEEE802154_AFILT_PANC_CHANGED);
+ trace_802154_drv_set_cca_ed_level(local, mbm);
+ ret = local->ops->set_cca_ed_level(&local->hw, mbm);
trace_802154_drv_return_int(local, ret);
return ret;
}
@@ -264,22 +351,4 @@ drv_set_max_frame_retries(struct ieee802154_local *local, s8 max_frame_retries)
return ret;
}
-static inline int
-drv_set_promiscuous_mode(struct ieee802154_local *local, bool on)
-{
- int ret;
-
- might_sleep();
-
- if (!local->ops->set_promiscuous_mode) {
- WARN_ON(1);
- return -EOPNOTSUPP;
- }
-
- trace_802154_drv_set_promiscuous_mode(local, on);
- ret = local->ops->set_promiscuous_mode(&local->hw, on);
- trace_802154_drv_return_int(local, ret);
- return ret;
-}
-
#endif /* __MAC802154_DRIVER_OPS */
diff --git a/net/mac802154/ieee802154_i.h b/net/mac802154/ieee802154_i.h
index 1381e6a5e180..509e0172fe82 100644
--- a/net/mac802154/ieee802154_i.h
+++ b/net/mac802154/ieee802154_i.h
@@ -26,6 +26,8 @@ struct ieee802154_local {
struct ieee802154_hw hw;
const struct ieee802154_ops *ops;
+ /* hardware address filter */
+ struct ieee802154_hw_addr_filt addr_filt;
/* ieee802154 phy */
struct wpan_phy *phy;
@@ -55,7 +57,7 @@ struct ieee802154_local {
struct sk_buff_head skb_queue;
struct sk_buff *tx_skb;
- struct work_struct tx_work;
+ struct work_struct sync_tx_work;
/* A negative Linux error code or a null/positive MLME error status */
int tx_result;
};
@@ -82,6 +84,16 @@ struct ieee802154_sub_if_data {
struct ieee802154_local *local;
struct net_device *dev;
+ /* Each interface starts and works in nominal state at a given filtering
+ * level given by iface_default_filtering, which is set once for all at
+ * the interface creation and should not evolve over time. For some MAC
+ * operations however, the filtering level may change temporarily, as
+ * reflected in the required_filtering field. The actual filtering at
+ * the PHY level may be different and is shown in struct wpan_phy.
+ */
+ enum ieee802154_filtering_level iface_default_filtering;
+ enum ieee802154_filtering_level required_filtering;
+
unsigned long state;
char name[IFNAMSIZ];
@@ -123,13 +135,53 @@ ieee802154_sdata_running(struct ieee802154_sub_if_data *sdata)
extern struct ieee802154_mlme_ops mac802154_mlme_wpan;
void ieee802154_rx(struct ieee802154_local *local, struct sk_buff *skb);
-void ieee802154_xmit_worker(struct work_struct *work);
+void ieee802154_xmit_sync_worker(struct work_struct *work);
+int ieee802154_sync_and_hold_queue(struct ieee802154_local *local);
+int ieee802154_mlme_op_pre(struct ieee802154_local *local);
+int ieee802154_mlme_tx(struct ieee802154_local *local,
+ struct ieee802154_sub_if_data *sdata,
+ struct sk_buff *skb);
+void ieee802154_mlme_op_post(struct ieee802154_local *local);
+int ieee802154_mlme_tx_one(struct ieee802154_local *local,
+ struct ieee802154_sub_if_data *sdata,
+ struct sk_buff *skb);
netdev_tx_t
ieee802154_monitor_start_xmit(struct sk_buff *skb, struct net_device *dev);
netdev_tx_t
ieee802154_subif_start_xmit(struct sk_buff *skb, struct net_device *dev);
enum hrtimer_restart ieee802154_xmit_ifs_timer(struct hrtimer *timer);
+/**
+ * ieee802154_hold_queue - hold ieee802154 queue
+ * @local: main mac object
+ *
+ * Hold a queue by incrementing an atomic counter and requesting the netif
+ * queues to be stopped. The queues cannot be woken up while the counter has not
+ * been reset with as any ieee802154_release_queue() calls as needed.
+ */
+void ieee802154_hold_queue(struct ieee802154_local *local);
+
+/**
+ * ieee802154_release_queue - release ieee802154 queue
+ * @local: main mac object
+ *
+ * Release a queue which is held by decrementing an atomic counter and wake it
+ * up only if the counter reaches 0.
+ */
+void ieee802154_release_queue(struct ieee802154_local *local);
+
+/**
+ * ieee802154_disable_queue - disable ieee802154 queue
+ * @local: main mac object
+ *
+ * When trying to sync the Tx queue, we cannot just stop the queue
+ * (which is basically a bit being set without proper lock handling)
+ * because it would be racy. We actually need to call netif_tx_disable()
+ * instead, which is done by this helper. Restarting the queue can
+ * however still be done with a regular wake call.
+ */
+void ieee802154_disable_queue(struct ieee802154_local *local);
+
/* MIB callbacks */
void mac802154_dev_set_page_channel(struct net_device *dev, u8 page, u8 chan);
diff --git a/net/mac802154/iface.c b/net/mac802154/iface.c
index 500ed1b81250..d9b50884d34e 100644
--- a/net/mac802154/iface.c
+++ b/net/mac802154/iface.c
@@ -147,25 +147,12 @@ static int ieee802154_setup_hw(struct ieee802154_sub_if_data *sdata)
struct wpan_dev *wpan_dev = &sdata->wpan_dev;
int ret;
- if (local->hw.flags & IEEE802154_HW_PROMISCUOUS) {
- ret = drv_set_promiscuous_mode(local,
- wpan_dev->promiscuous_mode);
- if (ret < 0)
- return ret;
- }
+ sdata->required_filtering = sdata->iface_default_filtering;
if (local->hw.flags & IEEE802154_HW_AFILT) {
- ret = drv_set_pan_id(local, wpan_dev->pan_id);
- if (ret < 0)
- return ret;
-
- ret = drv_set_extended_addr(local, wpan_dev->extended_addr);
- if (ret < 0)
- return ret;
-
- ret = drv_set_short_addr(local, wpan_dev->short_addr);
- if (ret < 0)
- return ret;
+ local->addr_filt.pan_id = wpan_dev->pan_id;
+ local->addr_filt.ieee_addr = wpan_dev->extended_addr;
+ local->addr_filt.short_addr = wpan_dev->short_addr;
}
if (local->hw.flags & IEEE802154_HW_LBT) {
@@ -206,7 +193,8 @@ static int mac802154_slave_open(struct net_device *dev)
if (res)
goto err;
- res = drv_start(local);
+ res = drv_start(local, sdata->required_filtering,
+ &local->addr_filt);
if (res)
goto err;
}
@@ -223,15 +211,16 @@ err:
static int
ieee802154_check_mac_settings(struct ieee802154_local *local,
- struct wpan_dev *wpan_dev,
- struct wpan_dev *nwpan_dev)
+ struct ieee802154_sub_if_data *sdata,
+ struct ieee802154_sub_if_data *nsdata)
{
+ struct wpan_dev *nwpan_dev = &nsdata->wpan_dev;
+ struct wpan_dev *wpan_dev = &sdata->wpan_dev;
+
ASSERT_RTNL();
- if (local->hw.flags & IEEE802154_HW_PROMISCUOUS) {
- if (wpan_dev->promiscuous_mode != nwpan_dev->promiscuous_mode)
- return -EBUSY;
- }
+ if (sdata->iface_default_filtering != nsdata->iface_default_filtering)
+ return -EBUSY;
if (local->hw.flags & IEEE802154_HW_AFILT) {
if (wpan_dev->pan_id != nwpan_dev->pan_id ||
@@ -285,8 +274,7 @@ ieee802154_check_concurrent_iface(struct ieee802154_sub_if_data *sdata,
/* check all phy mac sublayer settings are the same.
* We have only one phy, different values makes trouble.
*/
- ret = ieee802154_check_mac_settings(local, wpan_dev,
- &nsdata->wpan_dev);
+ ret = ieee802154_check_mac_settings(local, sdata, nsdata);
if (ret < 0)
return ret;
}
@@ -586,7 +574,7 @@ ieee802154_setup_sdata(struct ieee802154_sub_if_data *sdata,
sdata->dev->priv_destructor = mac802154_wpan_free;
sdata->dev->netdev_ops = &mac802154_wpan_ops;
sdata->dev->ml_priv = &mac802154_mlme_wpan;
- wpan_dev->promiscuous_mode = false;
+ sdata->iface_default_filtering = IEEE802154_FILTERING_4_FRAME_FIELDS;
wpan_dev->header_ops = &ieee802154_header_ops;
mutex_init(&sdata->sec_mtx);
@@ -600,7 +588,7 @@ ieee802154_setup_sdata(struct ieee802154_sub_if_data *sdata,
case NL802154_IFTYPE_MONITOR:
sdata->dev->needs_free_netdev = true;
sdata->dev->netdev_ops = &mac802154_monitor_ops;
- wpan_dev->promiscuous_mode = true;
+ sdata->iface_default_filtering = IEEE802154_FILTERING_NONE;
break;
default:
BUG();
diff --git a/net/mac802154/main.c b/net/mac802154/main.c
index bd7bdb1219dd..40fab08df24b 100644
--- a/net/mac802154/main.c
+++ b/net/mac802154/main.c
@@ -95,7 +95,7 @@ ieee802154_alloc_hw(size_t priv_data_len, const struct ieee802154_ops *ops)
skb_queue_head_init(&local->skb_queue);
- INIT_WORK(&local->tx_work, ieee802154_xmit_worker);
+ INIT_WORK(&local->sync_tx_work, ieee802154_xmit_sync_worker);
/* init supported flags with 802.15.4 default ranges */
phy->supported.max_minbe = 8;
diff --git a/net/mac802154/rx.c b/net/mac802154/rx.c
index 726b47a4611b..0724aac8f48c 100644
--- a/net/mac802154/rx.c
+++ b/net/mac802154/rx.c
@@ -34,6 +34,7 @@ ieee802154_subif_frame(struct ieee802154_sub_if_data *sdata,
struct sk_buff *skb, const struct ieee802154_hdr *hdr)
{
struct wpan_dev *wpan_dev = &sdata->wpan_dev;
+ struct wpan_phy *wpan_phy = sdata->local->hw.phy;
__le16 span, sshort;
int rc;
@@ -42,6 +43,17 @@ ieee802154_subif_frame(struct ieee802154_sub_if_data *sdata,
span = wpan_dev->pan_id;
sshort = wpan_dev->short_addr;
+ /* Level 3 filtering: Only beacons are accepted during scans */
+ if (sdata->required_filtering == IEEE802154_FILTERING_3_SCAN &&
+ sdata->required_filtering > wpan_phy->filtering) {
+ if (mac_cb(skb)->type != IEEE802154_FC_TYPE_BEACON) {
+ dev_dbg(&sdata->dev->dev,
+ "drop non-beacon frame (0x%x) during scan\n",
+ mac_cb(skb)->type);
+ goto fail;
+ }
+ }
+
switch (mac_cb(skb)->dest.mode) {
case IEEE802154_ADDR_NONE:
if (hdr->source.mode != IEEE802154_ADDR_NONE)
@@ -114,8 +126,10 @@ fail:
static void
ieee802154_print_addr(const char *name, const struct ieee802154_addr *addr)
{
- if (addr->mode == IEEE802154_ADDR_NONE)
+ if (addr->mode == IEEE802154_ADDR_NONE) {
pr_debug("%s not present\n", name);
+ return;
+ }
pr_debug("%s PAN ID: %04x\n", name, le16_to_cpu(addr->pan_id));
if (addr->mode == IEEE802154_ADDR_SHORT) {
@@ -209,6 +223,13 @@ __ieee802154_rx_handle_packet(struct ieee802154_local *local,
if (!ieee802154_sdata_running(sdata))
continue;
+ /* Do not deliver packets received on interfaces expecting
+ * AACK=1 if the address filters where disabled.
+ */
+ if (local->hw.phy->filtering < IEEE802154_FILTERING_4_FRAME_FIELDS &&
+ sdata->required_filtering == IEEE802154_FILTERING_4_FRAME_FIELDS)
+ continue;
+
ieee802154_subif_frame(sdata, skb, &hdr);
skb = NULL;
break;
@@ -268,10 +289,8 @@ void ieee802154_rx(struct ieee802154_local *local, struct sk_buff *skb)
ieee802154_monitors_rx(local, skb);
- /* Check if transceiver doesn't validate the checksum.
- * If not we validate the checksum here.
- */
- if (local->hw.flags & IEEE802154_HW_RX_DROP_BAD_CKSUM) {
+ /* Level 1 filtering: Check the FCS by software when relevant */
+ if (local->hw.phy->filtering == IEEE802154_FILTERING_NONE) {
crc = crc_ccitt(0, skb->data, skb->len);
if (crc) {
rcu_read_unlock();
diff --git a/net/mac802154/tx.c b/net/mac802154/tx.c
index c829e4a75325..9d8d43cf1e64 100644
--- a/net/mac802154/tx.c
+++ b/net/mac802154/tx.c
@@ -22,10 +22,10 @@
#include "ieee802154_i.h"
#include "driver-ops.h"
-void ieee802154_xmit_worker(struct work_struct *work)
+void ieee802154_xmit_sync_worker(struct work_struct *work)
{
struct ieee802154_local *local =
- container_of(work, struct ieee802154_local, tx_work);
+ container_of(work, struct ieee802154_local, sync_tx_work);
struct sk_buff *skb = local->tx_skb;
struct net_device *dev = skb->dev;
int res;
@@ -43,7 +43,9 @@ void ieee802154_xmit_worker(struct work_struct *work)
err_tx:
/* Restart the netif queue on each sub_if_data object. */
- ieee802154_wake_queue(&local->hw);
+ ieee802154_release_queue(local);
+ if (atomic_dec_and_test(&local->phy->ongoing_txs))
+ wake_up(&local->phy->sync_txq);
kfree_skb(skb);
netdev_dbg(dev, "transmission failed\n");
}
@@ -65,7 +67,7 @@ ieee802154_tx(struct ieee802154_local *local, struct sk_buff *skb)
consume_skb(skb);
skb = nskb;
} else {
- goto err_tx;
+ goto err_free_skb;
}
}
@@ -74,32 +76,134 @@ ieee802154_tx(struct ieee802154_local *local, struct sk_buff *skb)
}
/* Stop the netif queue on each sub_if_data object. */
- ieee802154_stop_queue(&local->hw);
+ ieee802154_hold_queue(local);
+ atomic_inc(&local->phy->ongoing_txs);
- /* async is priority, otherwise sync is fallback */
+ /* Drivers should preferably implement the async callback. In some rare
+ * cases they only provide a sync callback which we will use as a
+ * fallback.
+ */
if (local->ops->xmit_async) {
unsigned int len = skb->len;
ret = drv_xmit_async(local, skb);
- if (ret) {
- ieee802154_wake_queue(&local->hw);
- goto err_tx;
- }
+ if (ret)
+ goto err_wake_netif_queue;
dev->stats.tx_packets++;
dev->stats.tx_bytes += len;
} else {
local->tx_skb = skb;
- queue_work(local->workqueue, &local->tx_work);
+ queue_work(local->workqueue, &local->sync_tx_work);
}
return NETDEV_TX_OK;
-err_tx:
+err_wake_netif_queue:
+ ieee802154_release_queue(local);
+ if (atomic_dec_and_test(&local->phy->ongoing_txs))
+ wake_up(&local->phy->sync_txq);
+err_free_skb:
kfree_skb(skb);
return NETDEV_TX_OK;
}
+static int ieee802154_sync_queue(struct ieee802154_local *local)
+{
+ int ret;
+
+ ieee802154_hold_queue(local);
+ ieee802154_disable_queue(local);
+ wait_event(local->phy->sync_txq, !atomic_read(&local->phy->ongoing_txs));
+ ret = local->tx_result;
+ ieee802154_release_queue(local);
+
+ return ret;
+}
+
+int ieee802154_sync_and_hold_queue(struct ieee802154_local *local)
+{
+ int ret;
+
+ ieee802154_hold_queue(local);
+ ret = ieee802154_sync_queue(local);
+ set_bit(WPAN_PHY_FLAG_STATE_QUEUE_STOPPED, &local->phy->flags);
+
+ return ret;
+}
+
+int ieee802154_mlme_op_pre(struct ieee802154_local *local)
+{
+ return ieee802154_sync_and_hold_queue(local);
+}
+
+int ieee802154_mlme_tx(struct ieee802154_local *local,
+ struct ieee802154_sub_if_data *sdata,
+ struct sk_buff *skb)
+{
+ int ret;
+
+ /* Avoid possible calls to ->ndo_stop() when we asynchronously perform
+ * MLME transmissions.
+ */
+ rtnl_lock();
+
+ /* Ensure the device was not stopped, otherwise error out */
+ if (!local->open_count) {
+ rtnl_unlock();
+ return -ENETDOWN;
+ }
+
+ /* Warn if the ieee802154 core thinks MLME frames can be sent while the
+ * net interface expects this cannot happen.
+ */
+ if (WARN_ON_ONCE(!netif_running(sdata->dev))) {
+ rtnl_unlock();
+ return -ENETDOWN;
+ }
+
+ ieee802154_tx(local, skb);
+ ret = ieee802154_sync_queue(local);
+
+ rtnl_unlock();
+
+ return ret;
+}
+
+void ieee802154_mlme_op_post(struct ieee802154_local *local)
+{
+ ieee802154_release_queue(local);
+}
+
+int ieee802154_mlme_tx_one(struct ieee802154_local *local,
+ struct ieee802154_sub_if_data *sdata,
+ struct sk_buff *skb)
+{
+ int ret;
+
+ ieee802154_mlme_op_pre(local);
+ ret = ieee802154_mlme_tx(local, sdata, skb);
+ ieee802154_mlme_op_post(local);
+
+ return ret;
+}
+
+static bool ieee802154_queue_is_stopped(struct ieee802154_local *local)
+{
+ return test_bit(WPAN_PHY_FLAG_STATE_QUEUE_STOPPED, &local->phy->flags);
+}
+
+static netdev_tx_t
+ieee802154_hot_tx(struct ieee802154_local *local, struct sk_buff *skb)
+{
+ /* Warn if the net interface tries to transmit frames while the
+ * ieee802154 core assumes the queue is stopped.
+ */
+ WARN_ON_ONCE(ieee802154_queue_is_stopped(local));
+
+ return ieee802154_tx(local, skb);
+}
+
netdev_tx_t
ieee802154_monitor_start_xmit(struct sk_buff *skb, struct net_device *dev)
{
@@ -107,7 +211,7 @@ ieee802154_monitor_start_xmit(struct sk_buff *skb, struct net_device *dev)
skb->skb_iif = dev->ifindex;
- return ieee802154_tx(sdata->local, skb);
+ return ieee802154_hot_tx(sdata->local, skb);
}
netdev_tx_t
@@ -129,5 +233,5 @@ ieee802154_subif_start_xmit(struct sk_buff *skb, struct net_device *dev)
skb->skb_iif = dev->ifindex;
- return ieee802154_tx(sdata->local, skb);
+ return ieee802154_hot_tx(sdata->local, skb);
}
diff --git a/net/mac802154/util.c b/net/mac802154/util.c
index 9f024d85563b..ebc9a8521765 100644
--- a/net/mac802154/util.c
+++ b/net/mac802154/util.c
@@ -13,12 +13,23 @@
/* privid for wpan_phys to determine whether they belong to us or not */
const void *const mac802154_wpan_phy_privid = &mac802154_wpan_phy_privid;
-void ieee802154_wake_queue(struct ieee802154_hw *hw)
+/**
+ * ieee802154_wake_queue - wake ieee802154 queue
+ * @hw: main hardware object
+ *
+ * Tranceivers usually have either one transmit framebuffer or one framebuffer
+ * for both transmitting and receiving. Hence, the core currently only handles
+ * one frame at a time for each phy, which means we had to stop the queue to
+ * avoid new skb to come during the transmission. The queue then needs to be
+ * woken up after the operation.
+ */
+static void ieee802154_wake_queue(struct ieee802154_hw *hw)
{
struct ieee802154_local *local = hw_to_local(hw);
struct ieee802154_sub_if_data *sdata;
rcu_read_lock();
+ clear_bit(WPAN_PHY_FLAG_STATE_QUEUE_STOPPED, &local->phy->flags);
list_for_each_entry_rcu(sdata, &local->interfaces, list) {
if (!sdata->dev)
continue;
@@ -27,9 +38,18 @@ void ieee802154_wake_queue(struct ieee802154_hw *hw)
}
rcu_read_unlock();
}
-EXPORT_SYMBOL(ieee802154_wake_queue);
-void ieee802154_stop_queue(struct ieee802154_hw *hw)
+/**
+ * ieee802154_stop_queue - stop ieee802154 queue
+ * @hw: main hardware object
+ *
+ * Tranceivers usually have either one transmit framebuffer or one framebuffer
+ * for both transmitting and receiving. Hence, the core currently only handles
+ * one frame at a time for each phy, which means we need to tell upper layers to
+ * stop giving us new skbs while we are busy with the transmitted one. The queue
+ * must then be stopped before transmitting.
+ */
+static void ieee802154_stop_queue(struct ieee802154_hw *hw)
{
struct ieee802154_local *local = hw_to_local(hw);
struct ieee802154_sub_if_data *sdata;
@@ -43,14 +63,47 @@ void ieee802154_stop_queue(struct ieee802154_hw *hw)
}
rcu_read_unlock();
}
-EXPORT_SYMBOL(ieee802154_stop_queue);
+
+void ieee802154_hold_queue(struct ieee802154_local *local)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&local->phy->queue_lock, flags);
+ if (!atomic_fetch_inc(&local->phy->hold_txs))
+ ieee802154_stop_queue(&local->hw);
+ spin_unlock_irqrestore(&local->phy->queue_lock, flags);
+}
+
+void ieee802154_release_queue(struct ieee802154_local *local)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&local->phy->queue_lock, flags);
+ if (atomic_dec_and_test(&local->phy->hold_txs))
+ ieee802154_wake_queue(&local->hw);
+ spin_unlock_irqrestore(&local->phy->queue_lock, flags);
+}
+
+void ieee802154_disable_queue(struct ieee802154_local *local)
+{
+ struct ieee802154_sub_if_data *sdata;
+
+ rcu_read_lock();
+ list_for_each_entry_rcu(sdata, &local->interfaces, list) {
+ if (!sdata->dev)
+ continue;
+
+ netif_tx_disable(sdata->dev);
+ }
+ rcu_read_unlock();
+}
enum hrtimer_restart ieee802154_xmit_ifs_timer(struct hrtimer *timer)
{
struct ieee802154_local *local =
container_of(timer, struct ieee802154_local, ifs_timer);
- ieee802154_wake_queue(&local->hw);
+ ieee802154_release_queue(local);
return HRTIMER_NORESTART;
}
@@ -84,10 +137,12 @@ void ieee802154_xmit_complete(struct ieee802154_hw *hw, struct sk_buff *skb,
hw->phy->sifs_period * NSEC_PER_USEC,
HRTIMER_MODE_REL);
} else {
- ieee802154_wake_queue(hw);
+ ieee802154_release_queue(local);
}
dev_consume_skb_any(skb);
+ if (atomic_dec_and_test(&hw->phy->ongoing_txs))
+ wake_up(&hw->phy->sync_txq);
}
EXPORT_SYMBOL(ieee802154_xmit_complete);
@@ -97,8 +152,10 @@ void ieee802154_xmit_error(struct ieee802154_hw *hw, struct sk_buff *skb,
struct ieee802154_local *local = hw_to_local(hw);
local->tx_result = reason;
- ieee802154_wake_queue(hw);
+ ieee802154_release_queue(local);
dev_kfree_skb_any(skb);
+ if (atomic_dec_and_test(&hw->phy->ongoing_txs))
+ wake_up(&hw->phy->sync_txq);
}
EXPORT_SYMBOL(ieee802154_xmit_error);
diff --git a/net/mpls/af_mpls.c b/net/mpls/af_mpls.c
index b52afe316dc4..35b5f806fdda 100644
--- a/net/mpls/af_mpls.c
+++ b/net/mpls/af_mpls.c
@@ -1079,9 +1079,9 @@ static void mpls_get_stats(struct mpls_dev *mdev,
p = per_cpu_ptr(mdev->stats, i);
do {
- start = u64_stats_fetch_begin_irq(&p->syncp);
+ start = u64_stats_fetch_begin(&p->syncp);
local = p->stats;
- } while (u64_stats_fetch_retry_irq(&p->syncp, start));
+ } while (u64_stats_fetch_retry(&p->syncp, start));
stats->rx_packets += local.rx_packets;
stats->rx_bytes += local.rx_bytes;
diff --git a/net/mptcp/protocol.c b/net/mptcp/protocol.c
index b6dc6e260334..109eea2c65ff 100644
--- a/net/mptcp/protocol.c
+++ b/net/mptcp/protocol.c
@@ -2730,6 +2730,8 @@ static int mptcp_init_sock(struct sock *sk)
if (ret)
return ret;
+ set_bit(SOCK_CUSTOM_SOCKOPT, &sk->sk_socket->flags);
+
/* fetch the ca name; do it outside __mptcp_init_sock(), so that clone will
* propagate the correct value
*/
@@ -3707,6 +3709,8 @@ static int mptcp_stream_accept(struct socket *sock, struct socket *newsock,
struct mptcp_subflow_context *subflow;
struct sock *newsk = newsock->sk;
+ set_bit(SOCK_CUSTOM_SOCKOPT, &newsock->flags);
+
lock_sock(newsk);
/* PM/worker can now acquire the first subflow socket
@@ -3920,12 +3924,6 @@ static const struct proto_ops mptcp_v6_stream_ops = {
static struct proto mptcp_v6_prot;
-static void mptcp_v6_destroy(struct sock *sk)
-{
- mptcp_destroy(sk);
- inet6_destroy_sock(sk);
-}
-
static struct inet_protosw mptcp_v6_protosw = {
.type = SOCK_STREAM,
.protocol = IPPROTO_MPTCP,
@@ -3941,7 +3939,6 @@ int __init mptcp_proto_v6_init(void)
mptcp_v6_prot = mptcp_prot;
strcpy(mptcp_v6_prot.name, "MPTCPv6");
mptcp_v6_prot.slab = NULL;
- mptcp_v6_prot.destroy = mptcp_v6_destroy;
mptcp_v6_prot.obj_size = sizeof(struct mptcp6_sock);
err = proto_register(&mptcp_v6_prot, 1);
diff --git a/net/mptcp/sockopt.c b/net/mptcp/sockopt.c
index c7cb68c725b2..f85e9bbfe86f 100644
--- a/net/mptcp/sockopt.c
+++ b/net/mptcp/sockopt.c
@@ -560,6 +560,7 @@ static bool mptcp_supported_sockopt(int level, int optname)
case TCP_TX_DELAY:
case TCP_INQ:
case TCP_FASTOPEN_CONNECT:
+ case TCP_FASTOPEN_NO_COOKIE:
return true;
}
@@ -568,8 +569,8 @@ static bool mptcp_supported_sockopt(int level, int optname)
/* TCP_REPAIR, TCP_REPAIR_QUEUE, TCP_QUEUE_SEQ, TCP_REPAIR_OPTIONS,
* TCP_REPAIR_WINDOW are not supported, better avoid this mess
*/
- /* TCP_FASTOPEN_KEY, TCP_FASTOPEN, TCP_FASTOPEN_NO_COOKIE,
- * are not supported fastopen is currently unsupported
+ /* TCP_FASTOPEN_KEY, TCP_FASTOPEN are not supported because
+ * fastopen for the listener side is currently unsupported
*/
}
return false;
@@ -757,29 +758,17 @@ static int mptcp_setsockopt_v4(struct mptcp_sock *msk, int optname,
return -EOPNOTSUPP;
}
-static int mptcp_setsockopt_sol_tcp_defer(struct mptcp_sock *msk, sockptr_t optval,
- unsigned int optlen)
-{
- struct socket *listener;
-
- listener = __mptcp_nmpc_socket(msk);
- if (!listener)
- return 0; /* TCP_DEFER_ACCEPT does not fail */
-
- return tcp_setsockopt(listener->sk, SOL_TCP, TCP_DEFER_ACCEPT, optval, optlen);
-}
-
-static int mptcp_setsockopt_sol_tcp_fastopen_connect(struct mptcp_sock *msk, sockptr_t optval,
- unsigned int optlen)
+static int mptcp_setsockopt_first_sf_only(struct mptcp_sock *msk, int level, int optname,
+ sockptr_t optval, unsigned int optlen)
{
struct socket *sock;
- /* Limit to first subflow */
+ /* Limit to first subflow, before the connection establishment */
sock = __mptcp_nmpc_socket(msk);
if (!sock)
return -EINVAL;
- return tcp_setsockopt(sock->sk, SOL_TCP, TCP_FASTOPEN_CONNECT, optval, optlen);
+ return tcp_setsockopt(sock->sk, level, optname, optval, optlen);
}
static int mptcp_setsockopt_sol_tcp(struct mptcp_sock *msk, int optname,
@@ -809,9 +798,13 @@ static int mptcp_setsockopt_sol_tcp(struct mptcp_sock *msk, int optname,
case TCP_NODELAY:
return mptcp_setsockopt_sol_tcp_nodelay(msk, optval, optlen);
case TCP_DEFER_ACCEPT:
- return mptcp_setsockopt_sol_tcp_defer(msk, optval, optlen);
+ /* See tcp.c: TCP_DEFER_ACCEPT does not fail */
+ mptcp_setsockopt_first_sf_only(msk, SOL_TCP, optname, optval, optlen);
+ return 0;
case TCP_FASTOPEN_CONNECT:
- return mptcp_setsockopt_sol_tcp_fastopen_connect(msk, optval, optlen);
+ case TCP_FASTOPEN_NO_COOKIE:
+ return mptcp_setsockopt_first_sf_only(msk, SOL_TCP, optname,
+ optval, optlen);
}
return -EOPNOTSUPP;
@@ -1174,6 +1167,7 @@ static int mptcp_getsockopt_sol_tcp(struct mptcp_sock *msk, int optname,
case TCP_CC_INFO:
case TCP_DEFER_ACCEPT:
case TCP_FASTOPEN_CONNECT:
+ case TCP_FASTOPEN_NO_COOKIE:
return mptcp_getsockopt_first_sf_only(msk, SOL_TCP, optname,
optval, optlen);
case TCP_INQ:
diff --git a/net/mptcp/subflow.c b/net/mptcp/subflow.c
index 02a54d59697b..437a283ba6ea 100644
--- a/net/mptcp/subflow.c
+++ b/net/mptcp/subflow.c
@@ -1602,7 +1602,9 @@ int mptcp_subflow_create_socket(struct sock *sk, struct socket **new_sock)
/* kernel sockets do not by default acquire net ref, but TCP timer
* needs it.
+ * Update ns_tracker to current stack trace and refcounted tracker.
*/
+ __netns_tracker_free(net, &sf->sk->ns_tracker, false);
sf->sk->sk_net_refcnt = 1;
get_net_track(net, &sf->sk->ns_tracker, GFP_KERNEL);
sock_inuse_add(net, 1);
diff --git a/net/netfilter/Kconfig b/net/netfilter/Kconfig
index 4b8d04640ff3..0846bd75b1da 100644
--- a/net/netfilter/Kconfig
+++ b/net/netfilter/Kconfig
@@ -568,12 +568,6 @@ config NFT_TUNNEL
This option adds the "tunnel" expression that you can use to set
tunneling policies.
-config NFT_OBJREF
- tristate "Netfilter nf_tables stateful object reference module"
- help
- This option adds the "objref" expression that allows you to refer to
- stateful objects, such as counters and quotas.
-
config NFT_QUEUE
depends on NETFILTER_NETLINK_QUEUE
tristate "Netfilter nf_tables queue module"
diff --git a/net/netfilter/Makefile b/net/netfilter/Makefile
index 0f060d100880..1d4db1943936 100644
--- a/net/netfilter/Makefile
+++ b/net/netfilter/Makefile
@@ -86,7 +86,8 @@ nf_tables-objs := nf_tables_core.o nf_tables_api.o nft_chain_filter.o \
nf_tables_trace.o nft_immediate.o nft_cmp.o nft_range.o \
nft_bitwise.o nft_byteorder.o nft_payload.o nft_lookup.o \
nft_dynset.o nft_meta.o nft_rt.o nft_exthdr.o nft_last.o \
- nft_counter.o nft_chain_route.o nf_tables_offload.o \
+ nft_counter.o nft_objref.o nft_inner.o \
+ nft_chain_route.o nf_tables_offload.o \
nft_set_hash.o nft_set_bitmap.o nft_set_rbtree.o \
nft_set_pipapo.o
@@ -104,7 +105,6 @@ obj-$(CONFIG_NFT_CT) += nft_ct.o
obj-$(CONFIG_NFT_FLOW_OFFLOAD) += nft_flow_offload.o
obj-$(CONFIG_NFT_LIMIT) += nft_limit.o
obj-$(CONFIG_NFT_NAT) += nft_nat.o
-obj-$(CONFIG_NFT_OBJREF) += nft_objref.o
obj-$(CONFIG_NFT_QUEUE) += nft_queue.o
obj-$(CONFIG_NFT_QUOTA) += nft_quota.o
obj-$(CONFIG_NFT_REJECT) += nft_reject.o
diff --git a/net/netfilter/ipvs/ip_vs_ctl.c b/net/netfilter/ipvs/ip_vs_ctl.c
index 988222fff9f0..4d62059a6021 100644
--- a/net/netfilter/ipvs/ip_vs_ctl.c
+++ b/net/netfilter/ipvs/ip_vs_ctl.c
@@ -2296,13 +2296,13 @@ static int ip_vs_stats_percpu_show(struct seq_file *seq, void *v)
u64 conns, inpkts, outpkts, inbytes, outbytes;
do {
- start = u64_stats_fetch_begin_irq(&u->syncp);
+ start = u64_stats_fetch_begin(&u->syncp);
conns = u->cnt.conns;
inpkts = u->cnt.inpkts;
outpkts = u->cnt.outpkts;
inbytes = u->cnt.inbytes;
outbytes = u->cnt.outbytes;
- } while (u64_stats_fetch_retry_irq(&u->syncp, start));
+ } while (u64_stats_fetch_retry(&u->syncp, start));
seq_printf(seq, "%3X %8LX %8LX %8LX %16LX %16LX\n",
i, (u64)conns, (u64)inpkts,
diff --git a/net/netfilter/nf_conntrack_helper.c b/net/netfilter/nf_conntrack_helper.c
index ff737a76052e..48ea6d0264b5 100644
--- a/net/netfilter/nf_conntrack_helper.c
+++ b/net/netfilter/nf_conntrack_helper.c
@@ -26,7 +26,9 @@
#include <net/netfilter/nf_conntrack_extend.h>
#include <net/netfilter/nf_conntrack_helper.h>
#include <net/netfilter/nf_conntrack_l4proto.h>
+#include <net/netfilter/nf_conntrack_seqadj.h>
#include <net/netfilter/nf_log.h>
+#include <net/ip.h>
static DEFINE_MUTEX(nf_ct_helper_mutex);
struct hlist_head *nf_ct_helper_hash __read_mostly;
@@ -240,6 +242,104 @@ int __nf_ct_try_assign_helper(struct nf_conn *ct, struct nf_conn *tmpl,
}
EXPORT_SYMBOL_GPL(__nf_ct_try_assign_helper);
+/* 'skb' should already be pulled to nh_ofs. */
+int nf_ct_helper(struct sk_buff *skb, struct nf_conn *ct,
+ enum ip_conntrack_info ctinfo, u16 proto)
+{
+ const struct nf_conntrack_helper *helper;
+ const struct nf_conn_help *help;
+ unsigned int protoff;
+ int err;
+
+ if (ctinfo == IP_CT_RELATED_REPLY)
+ return NF_ACCEPT;
+
+ help = nfct_help(ct);
+ if (!help)
+ return NF_ACCEPT;
+
+ helper = rcu_dereference(help->helper);
+ if (!helper)
+ return NF_ACCEPT;
+
+ if (helper->tuple.src.l3num != NFPROTO_UNSPEC &&
+ helper->tuple.src.l3num != proto)
+ return NF_ACCEPT;
+
+ switch (proto) {
+ case NFPROTO_IPV4:
+ protoff = ip_hdrlen(skb);
+ proto = ip_hdr(skb)->protocol;
+ break;
+ case NFPROTO_IPV6: {
+ u8 nexthdr = ipv6_hdr(skb)->nexthdr;
+ __be16 frag_off;
+ int ofs;
+
+ ofs = ipv6_skip_exthdr(skb, sizeof(struct ipv6hdr), &nexthdr,
+ &frag_off);
+ if (ofs < 0 || (frag_off & htons(~0x7)) != 0) {
+ pr_debug("proto header not found\n");
+ return NF_ACCEPT;
+ }
+ protoff = ofs;
+ proto = nexthdr;
+ break;
+ }
+ default:
+ WARN_ONCE(1, "helper invoked on non-IP family!");
+ return NF_DROP;
+ }
+
+ if (helper->tuple.dst.protonum != proto)
+ return NF_ACCEPT;
+
+ err = helper->help(skb, protoff, ct, ctinfo);
+ if (err != NF_ACCEPT)
+ return err;
+
+ /* Adjust seqs after helper. This is needed due to some helpers (e.g.,
+ * FTP with NAT) adusting the TCP payload size when mangling IP
+ * addresses and/or port numbers in the text-based control connection.
+ */
+ if (test_bit(IPS_SEQ_ADJUST_BIT, &ct->status) &&
+ !nf_ct_seq_adjust(skb, ct, ctinfo, protoff))
+ return NF_DROP;
+ return NF_ACCEPT;
+}
+EXPORT_SYMBOL_GPL(nf_ct_helper);
+
+int nf_ct_add_helper(struct nf_conn *ct, const char *name, u8 family,
+ u8 proto, bool nat, struct nf_conntrack_helper **hp)
+{
+ struct nf_conntrack_helper *helper;
+ struct nf_conn_help *help;
+ int ret = 0;
+
+ helper = nf_conntrack_helper_try_module_get(name, family, proto);
+ if (!helper)
+ return -EINVAL;
+
+ help = nf_ct_helper_ext_add(ct, GFP_KERNEL);
+ if (!help) {
+ nf_conntrack_helper_put(helper);
+ return -ENOMEM;
+ }
+#if IS_ENABLED(CONFIG_NF_NAT)
+ if (nat) {
+ ret = nf_nat_helper_try_module_get(name, family, proto);
+ if (ret) {
+ nf_conntrack_helper_put(helper);
+ return ret;
+ }
+ }
+#endif
+ rcu_assign_pointer(help->helper, helper);
+ *hp = helper;
+ return ret;
+}
+EXPORT_SYMBOL_GPL(nf_ct_add_helper);
+
/* appropriate ct lock protecting must be taken by caller */
static int unhelp(struct nf_conn *ct, void *me)
{
diff --git a/net/netfilter/nf_tables_api.c b/net/netfilter/nf_tables_api.c
index e7152d599d73..38e1256b6d18 100644
--- a/net/netfilter/nf_tables_api.c
+++ b/net/netfilter/nf_tables_api.c
@@ -1534,10 +1534,10 @@ static int nft_dump_stats(struct sk_buff *skb, struct nft_stats __percpu *stats)
for_each_possible_cpu(cpu) {
cpu_stats = per_cpu_ptr(stats, cpu);
do {
- seq = u64_stats_fetch_begin_irq(&cpu_stats->syncp);
+ seq = u64_stats_fetch_begin(&cpu_stats->syncp);
pkts = cpu_stats->pkts;
bytes = cpu_stats->bytes;
- } while (u64_stats_fetch_retry_irq(&cpu_stats->syncp, seq));
+ } while (u64_stats_fetch_retry(&cpu_stats->syncp, seq));
total.pkts += pkts;
total.bytes += bytes;
}
@@ -2857,6 +2857,43 @@ err1:
return err;
}
+int nft_expr_inner_parse(const struct nft_ctx *ctx, const struct nlattr *nla,
+ struct nft_expr_info *info)
+{
+ struct nlattr *tb[NFTA_EXPR_MAX + 1];
+ const struct nft_expr_type *type;
+ int err;
+
+ err = nla_parse_nested_deprecated(tb, NFTA_EXPR_MAX, nla,
+ nft_expr_policy, NULL);
+ if (err < 0)
+ return err;
+
+ if (!tb[NFTA_EXPR_DATA])
+ return -EINVAL;
+
+ type = __nft_expr_type_get(ctx->family, tb[NFTA_EXPR_NAME]);
+ if (IS_ERR(type))
+ return PTR_ERR(type);
+
+ if (!type->inner_ops)
+ return -EOPNOTSUPP;
+
+ err = nla_parse_nested_deprecated(info->tb, type->maxattr,
+ tb[NFTA_EXPR_DATA],
+ type->policy, NULL);
+ if (err < 0)
+ goto err_nla_parse;
+
+ info->attr = nla;
+ info->ops = type->inner_ops;
+
+ return 0;
+
+err_nla_parse:
+ return err;
+}
+
static int nf_tables_newexpr(const struct nft_ctx *ctx,
const struct nft_expr_info *expr_info,
struct nft_expr *expr)
diff --git a/net/netfilter/nf_tables_core.c b/net/netfilter/nf_tables_core.c
index cee3e4e905ec..709a736c301c 100644
--- a/net/netfilter/nf_tables_core.c
+++ b/net/netfilter/nf_tables_core.c
@@ -340,6 +340,8 @@ static struct nft_expr_type *nft_basic_types[] = {
&nft_exthdr_type,
&nft_last_type,
&nft_counter_type,
+ &nft_objref_type,
+ &nft_inner_type,
};
static struct nft_object_type *nft_basic_objects[] = {
diff --git a/net/netfilter/nft_inner.c b/net/netfilter/nft_inner.c
new file mode 100644
index 000000000000..eae7caeff316
--- /dev/null
+++ b/net/netfilter/nft_inner.c
@@ -0,0 +1,384 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2022 Pablo Neira Ayuso <pablo@netfilter.org>
+ */
+
+#include <linux/kernel.h>
+#include <linux/if_vlan.h>
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/netlink.h>
+#include <linux/netfilter.h>
+#include <linux/netfilter/nf_tables.h>
+#include <net/netfilter/nf_tables_core.h>
+#include <net/netfilter/nf_tables.h>
+#include <net/netfilter/nft_meta.h>
+#include <net/netfilter/nf_tables_offload.h>
+#include <linux/tcp.h>
+#include <linux/udp.h>
+#include <net/gre.h>
+#include <net/geneve.h>
+#include <net/ip.h>
+#include <linux/icmpv6.h>
+#include <linux/ip.h>
+#include <linux/ipv6.h>
+
+static DEFINE_PER_CPU(struct nft_inner_tun_ctx, nft_pcpu_tun_ctx);
+
+/* Same layout as nft_expr but it embeds the private expression data area. */
+struct __nft_expr {
+ const struct nft_expr_ops *ops;
+ union {
+ struct nft_payload payload;
+ struct nft_meta meta;
+ } __attribute__((aligned(__alignof__(u64))));
+};
+
+enum {
+ NFT_INNER_EXPR_PAYLOAD,
+ NFT_INNER_EXPR_META,
+};
+
+struct nft_inner {
+ u8 flags;
+ u8 hdrsize;
+ u8 type;
+ u8 expr_type;
+
+ struct __nft_expr expr;
+};
+
+static int nft_inner_parse_l2l3(const struct nft_inner *priv,
+ const struct nft_pktinfo *pkt,
+ struct nft_inner_tun_ctx *ctx, u32 off)
+{
+ __be16 llproto, outer_llproto;
+ u32 nhoff, thoff;
+
+ if (priv->flags & NFT_INNER_LL) {
+ struct vlan_ethhdr *veth, _veth;
+ struct ethhdr *eth, _eth;
+ u32 hdrsize;
+
+ eth = skb_header_pointer(pkt->skb, off, sizeof(_eth), &_eth);
+ if (!eth)
+ return -1;
+
+ switch (eth->h_proto) {
+ case htons(ETH_P_IP):
+ case htons(ETH_P_IPV6):
+ llproto = eth->h_proto;
+ hdrsize = sizeof(_eth);
+ break;
+ case htons(ETH_P_8021Q):
+ veth = skb_header_pointer(pkt->skb, off, sizeof(_veth), &_veth);
+ if (!eth)
+ return -1;
+
+ outer_llproto = veth->h_vlan_encapsulated_proto;
+ llproto = veth->h_vlan_proto;
+ hdrsize = sizeof(_veth);
+ break;
+ default:
+ return -1;
+ }
+
+ ctx->inner_lloff = off;
+ ctx->flags |= NFT_PAYLOAD_CTX_INNER_LL;
+ off += hdrsize;
+ } else {
+ struct iphdr *iph;
+ u32 _version;
+
+ iph = skb_header_pointer(pkt->skb, off, sizeof(_version), &_version);
+ if (!iph)
+ return -1;
+
+ switch (iph->version) {
+ case 4:
+ llproto = htons(ETH_P_IP);
+ break;
+ case 6:
+ llproto = htons(ETH_P_IPV6);
+ break;
+ default:
+ return -1;
+ }
+ }
+
+ ctx->llproto = llproto;
+ if (llproto == htons(ETH_P_8021Q))
+ llproto = outer_llproto;
+
+ nhoff = off;
+
+ switch (llproto) {
+ case htons(ETH_P_IP): {
+ struct iphdr *iph, _iph;
+
+ iph = skb_header_pointer(pkt->skb, nhoff, sizeof(_iph), &_iph);
+ if (!iph)
+ return -1;
+
+ if (iph->ihl < 5 || iph->version != 4)
+ return -1;
+
+ ctx->inner_nhoff = nhoff;
+ ctx->flags |= NFT_PAYLOAD_CTX_INNER_NH;
+
+ thoff = nhoff + (iph->ihl * 4);
+ if ((ntohs(iph->frag_off) & IP_OFFSET) == 0) {
+ ctx->flags |= NFT_PAYLOAD_CTX_INNER_TH;
+ ctx->inner_thoff = thoff;
+ ctx->l4proto = iph->protocol;
+ }
+ }
+ break;
+ case htons(ETH_P_IPV6): {
+ struct ipv6hdr *ip6h, _ip6h;
+ int fh_flags = IP6_FH_F_AUTH;
+ unsigned short fragoff;
+ int l4proto;
+
+ ip6h = skb_header_pointer(pkt->skb, nhoff, sizeof(_ip6h), &_ip6h);
+ if (!ip6h)
+ return -1;
+
+ if (ip6h->version != 6)
+ return -1;
+
+ ctx->inner_nhoff = nhoff;
+ ctx->flags |= NFT_PAYLOAD_CTX_INNER_NH;
+
+ thoff = nhoff;
+ l4proto = ipv6_find_hdr(pkt->skb, &thoff, -1, &fragoff, &fh_flags);
+ if (l4proto < 0 || thoff > U16_MAX)
+ return -1;
+
+ if (fragoff == 0) {
+ thoff = nhoff + sizeof(_ip6h);
+ ctx->flags |= NFT_PAYLOAD_CTX_INNER_TH;
+ ctx->inner_thoff = thoff;
+ ctx->l4proto = l4proto;
+ }
+ }
+ break;
+ default:
+ return -1;
+ }
+
+ return 0;
+}
+
+static int nft_inner_parse_tunhdr(const struct nft_inner *priv,
+ const struct nft_pktinfo *pkt,
+ struct nft_inner_tun_ctx *ctx, u32 *off)
+{
+ if (pkt->tprot == IPPROTO_GRE) {
+ ctx->inner_tunoff = pkt->thoff;
+ ctx->flags |= NFT_PAYLOAD_CTX_INNER_TUN;
+ return 0;
+ }
+
+ if (pkt->tprot != IPPROTO_UDP)
+ return -1;
+
+ ctx->inner_tunoff = *off;
+ ctx->flags |= NFT_PAYLOAD_CTX_INNER_TUN;
+ *off += priv->hdrsize;
+
+ switch (priv->type) {
+ case NFT_INNER_GENEVE: {
+ struct genevehdr *gnvh, _gnvh;
+
+ gnvh = skb_header_pointer(pkt->skb, pkt->inneroff,
+ sizeof(_gnvh), &_gnvh);
+ if (!gnvh)
+ return -1;
+
+ *off += gnvh->opt_len * 4;
+ }
+ break;
+ default:
+ break;
+ }
+
+ return 0;
+}
+
+static int nft_inner_parse(const struct nft_inner *priv,
+ struct nft_pktinfo *pkt,
+ struct nft_inner_tun_ctx *tun_ctx)
+{
+ struct nft_inner_tun_ctx ctx = {};
+ u32 off = pkt->inneroff;
+
+ if (priv->flags & NFT_INNER_HDRSIZE &&
+ nft_inner_parse_tunhdr(priv, pkt, &ctx, &off) < 0)
+ return -1;
+
+ if (priv->flags & (NFT_INNER_LL | NFT_INNER_NH)) {
+ if (nft_inner_parse_l2l3(priv, pkt, &ctx, off) < 0)
+ return -1;
+ } else if (priv->flags & NFT_INNER_TH) {
+ ctx.inner_thoff = off;
+ ctx.flags |= NFT_PAYLOAD_CTX_INNER_TH;
+ }
+
+ *tun_ctx = ctx;
+ tun_ctx->type = priv->type;
+ pkt->flags |= NFT_PKTINFO_INNER_FULL;
+
+ return 0;
+}
+
+static bool nft_inner_parse_needed(const struct nft_inner *priv,
+ const struct nft_pktinfo *pkt,
+ const struct nft_inner_tun_ctx *tun_ctx)
+{
+ if (!(pkt->flags & NFT_PKTINFO_INNER_FULL))
+ return true;
+
+ if (priv->type != tun_ctx->type)
+ return true;
+
+ return false;
+}
+
+static void nft_inner_eval(const struct nft_expr *expr, struct nft_regs *regs,
+ const struct nft_pktinfo *pkt)
+{
+ struct nft_inner_tun_ctx *tun_ctx = this_cpu_ptr(&nft_pcpu_tun_ctx);
+ const struct nft_inner *priv = nft_expr_priv(expr);
+
+ if (nft_payload_inner_offset(pkt) < 0)
+ goto err;
+
+ if (nft_inner_parse_needed(priv, pkt, tun_ctx) &&
+ nft_inner_parse(priv, (struct nft_pktinfo *)pkt, tun_ctx) < 0)
+ goto err;
+
+ switch (priv->expr_type) {
+ case NFT_INNER_EXPR_PAYLOAD:
+ nft_payload_inner_eval((struct nft_expr *)&priv->expr, regs, pkt, tun_ctx);
+ break;
+ case NFT_INNER_EXPR_META:
+ nft_meta_inner_eval((struct nft_expr *)&priv->expr, regs, pkt, tun_ctx);
+ break;
+ default:
+ WARN_ON_ONCE(1);
+ goto err;
+ }
+ return;
+err:
+ regs->verdict.code = NFT_BREAK;
+}
+
+static const struct nla_policy nft_inner_policy[NFTA_INNER_MAX + 1] = {
+ [NFTA_INNER_NUM] = { .type = NLA_U32 },
+ [NFTA_INNER_FLAGS] = { .type = NLA_U32 },
+ [NFTA_INNER_HDRSIZE] = { .type = NLA_U32 },
+ [NFTA_INNER_TYPE] = { .type = NLA_U32 },
+ [NFTA_INNER_EXPR] = { .type = NLA_NESTED },
+};
+
+struct nft_expr_info {
+ const struct nft_expr_ops *ops;
+ const struct nlattr *attr;
+ struct nlattr *tb[NFT_EXPR_MAXATTR + 1];
+};
+
+static int nft_inner_init(const struct nft_ctx *ctx,
+ const struct nft_expr *expr,
+ const struct nlattr * const tb[])
+{
+ struct nft_inner *priv = nft_expr_priv(expr);
+ u32 flags, hdrsize, type, num;
+ struct nft_expr_info expr_info;
+ int err;
+
+ if (!tb[NFTA_INNER_FLAGS] ||
+ !tb[NFTA_INNER_HDRSIZE] ||
+ !tb[NFTA_INNER_TYPE] ||
+ !tb[NFTA_INNER_EXPR])
+ return -EINVAL;
+
+ flags = ntohl(nla_get_be32(tb[NFTA_INNER_FLAGS]));
+ if (flags & ~NFT_INNER_MASK)
+ return -EOPNOTSUPP;
+
+ num = ntohl(nla_get_be32(tb[NFTA_INNER_NUM]));
+ if (num != 0)
+ return -EOPNOTSUPP;
+
+ hdrsize = ntohl(nla_get_be32(tb[NFTA_INNER_HDRSIZE]));
+ type = ntohl(nla_get_be32(tb[NFTA_INNER_TYPE]));
+
+ if (type > U8_MAX)
+ return -EINVAL;
+
+ if (flags & NFT_INNER_HDRSIZE) {
+ if (hdrsize == 0 || hdrsize > 64)
+ return -EOPNOTSUPP;
+ }
+
+ priv->flags = flags;
+ priv->hdrsize = hdrsize;
+ priv->type = type;
+
+ err = nft_expr_inner_parse(ctx, tb[NFTA_INNER_EXPR], &expr_info);
+ if (err < 0)
+ return err;
+
+ priv->expr.ops = expr_info.ops;
+
+ if (!strcmp(expr_info.ops->type->name, "payload"))
+ priv->expr_type = NFT_INNER_EXPR_PAYLOAD;
+ else if (!strcmp(expr_info.ops->type->name, "meta"))
+ priv->expr_type = NFT_INNER_EXPR_META;
+ else
+ return -EINVAL;
+
+ err = expr_info.ops->init(ctx, (struct nft_expr *)&priv->expr,
+ (const struct nlattr * const*)expr_info.tb);
+ if (err < 0)
+ return err;
+
+ return 0;
+}
+
+static int nft_inner_dump(struct sk_buff *skb, const struct nft_expr *expr)
+{
+ const struct nft_inner *priv = nft_expr_priv(expr);
+
+ if (nla_put_be32(skb, NFTA_INNER_NUM, htonl(0)) ||
+ nla_put_be32(skb, NFTA_INNER_TYPE, htonl(priv->type)) ||
+ nla_put_be32(skb, NFTA_INNER_FLAGS, htonl(priv->flags)) ||
+ nla_put_be32(skb, NFTA_INNER_HDRSIZE, htonl(priv->hdrsize)))
+ goto nla_put_failure;
+
+ if (nft_expr_dump(skb, NFTA_INNER_EXPR,
+ (struct nft_expr *)&priv->expr) < 0)
+ goto nla_put_failure;
+
+ return 0;
+
+nla_put_failure:
+ return -1;
+}
+
+static const struct nft_expr_ops nft_inner_ops = {
+ .type = &nft_inner_type,
+ .size = NFT_EXPR_SIZE(sizeof(struct nft_inner)),
+ .eval = nft_inner_eval,
+ .init = nft_inner_init,
+ .dump = nft_inner_dump,
+};
+
+struct nft_expr_type nft_inner_type __read_mostly = {
+ .name = "inner",
+ .ops = &nft_inner_ops,
+ .policy = nft_inner_policy,
+ .maxattr = NFTA_INNER_MAX,
+ .owner = THIS_MODULE,
+};
diff --git a/net/netfilter/nft_meta.c b/net/netfilter/nft_meta.c
index 55d2d49c3425..8c39adeebb5c 100644
--- a/net/netfilter/nft_meta.c
+++ b/net/netfilter/nft_meta.c
@@ -831,9 +831,71 @@ nft_meta_select_ops(const struct nft_ctx *ctx,
return ERR_PTR(-EINVAL);
}
+static int nft_meta_inner_init(const struct nft_ctx *ctx,
+ const struct nft_expr *expr,
+ const struct nlattr * const tb[])
+{
+ struct nft_meta *priv = nft_expr_priv(expr);
+ unsigned int len;
+
+ priv->key = ntohl(nla_get_be32(tb[NFTA_META_KEY]));
+ switch (priv->key) {
+ case NFT_META_PROTOCOL:
+ len = sizeof(u16);
+ break;
+ case NFT_META_L4PROTO:
+ len = sizeof(u32);
+ break;
+ default:
+ return -EOPNOTSUPP;
+ }
+ priv->len = len;
+
+ return nft_parse_register_store(ctx, tb[NFTA_META_DREG], &priv->dreg,
+ NULL, NFT_DATA_VALUE, len);
+}
+
+void nft_meta_inner_eval(const struct nft_expr *expr,
+ struct nft_regs *regs,
+ const struct nft_pktinfo *pkt,
+ struct nft_inner_tun_ctx *tun_ctx)
+{
+ const struct nft_meta *priv = nft_expr_priv(expr);
+ u32 *dest = &regs->data[priv->dreg];
+
+ switch (priv->key) {
+ case NFT_META_PROTOCOL:
+ nft_reg_store16(dest, (__force u16)tun_ctx->llproto);
+ break;
+ case NFT_META_L4PROTO:
+ if (!(tun_ctx->flags & NFT_PAYLOAD_CTX_INNER_TH))
+ goto err;
+
+ nft_reg_store8(dest, tun_ctx->l4proto);
+ break;
+ default:
+ WARN_ON_ONCE(1);
+ goto err;
+ }
+ return;
+
+err:
+ regs->verdict.code = NFT_BREAK;
+}
+EXPORT_SYMBOL_GPL(nft_meta_inner_eval);
+
+static const struct nft_expr_ops nft_meta_inner_ops = {
+ .type = &nft_meta_type,
+ .size = NFT_EXPR_SIZE(sizeof(struct nft_meta)),
+ .init = nft_meta_inner_init,
+ .dump = nft_meta_get_dump,
+ /* direct call to nft_meta_inner_eval(). */
+};
+
struct nft_expr_type nft_meta_type __read_mostly = {
.name = "meta",
.select_ops = nft_meta_select_ops,
+ .inner_ops = &nft_meta_inner_ops,
.policy = nft_meta_policy,
.maxattr = NFTA_META_MAX,
.owner = THIS_MODULE,
diff --git a/net/netfilter/nft_objref.c b/net/netfilter/nft_objref.c
index 5d8d91b3904d..74e0eea4abac 100644
--- a/net/netfilter/nft_objref.c
+++ b/net/netfilter/nft_objref.c
@@ -82,7 +82,6 @@ static void nft_objref_activate(const struct nft_ctx *ctx,
obj->use++;
}
-static struct nft_expr_type nft_objref_type;
static const struct nft_expr_ops nft_objref_ops = {
.type = &nft_objref_type,
.size = NFT_EXPR_SIZE(sizeof(struct nft_object *)),
@@ -195,7 +194,6 @@ static void nft_objref_map_destroy(const struct nft_ctx *ctx,
nf_tables_destroy_set(ctx, priv->set);
}
-static struct nft_expr_type nft_objref_type;
static const struct nft_expr_ops nft_objref_map_ops = {
.type = &nft_objref_type,
.size = NFT_EXPR_SIZE(sizeof(struct nft_objref_map)),
@@ -233,28 +231,10 @@ static const struct nla_policy nft_objref_policy[NFTA_OBJREF_MAX + 1] = {
[NFTA_OBJREF_SET_ID] = { .type = NLA_U32 },
};
-static struct nft_expr_type nft_objref_type __read_mostly = {
+struct nft_expr_type nft_objref_type __read_mostly = {
.name = "objref",
.select_ops = nft_objref_select_ops,
.policy = nft_objref_policy,
.maxattr = NFTA_OBJREF_MAX,
.owner = THIS_MODULE,
};
-
-static int __init nft_objref_module_init(void)
-{
- return nft_register_expr(&nft_objref_type);
-}
-
-static void __exit nft_objref_module_exit(void)
-{
- nft_unregister_expr(&nft_objref_type);
-}
-
-module_init(nft_objref_module_init);
-module_exit(nft_objref_module_exit);
-
-MODULE_LICENSE("GPL");
-MODULE_AUTHOR("Pablo Neira Ayuso <pablo@netfilter.org>");
-MODULE_ALIAS_NFT_EXPR("objref");
-MODULE_DESCRIPTION("nftables stateful object reference module");
diff --git a/net/netfilter/nft_payload.c b/net/netfilter/nft_payload.c
index 4edd899aeb9b..ac046484caf3 100644
--- a/net/netfilter/nft_payload.c
+++ b/net/netfilter/nft_payload.c
@@ -19,6 +19,7 @@
/* For layer 4 checksum field offset. */
#include <linux/tcp.h>
#include <linux/udp.h>
+#include <net/gre.h>
#include <linux/icmpv6.h>
#include <linux/ip.h>
#include <linux/ipv6.h>
@@ -100,6 +101,40 @@ static int __nft_payload_inner_offset(struct nft_pktinfo *pkt)
pkt->inneroff = thoff + __tcp_hdrlen(th);
}
break;
+ case IPPROTO_GRE: {
+ u32 offset = sizeof(struct gre_base_hdr), version;
+ struct gre_base_hdr *gre, _gre;
+
+ gre = skb_header_pointer(pkt->skb, thoff, sizeof(_gre), &_gre);
+ if (!gre)
+ return -1;
+
+ version = gre->flags & GRE_VERSION;
+ switch (version) {
+ case GRE_VERSION_0:
+ if (gre->flags & GRE_ROUTING)
+ return -1;
+
+ if (gre->flags & GRE_CSUM) {
+ offset += sizeof_field(struct gre_full_hdr, csum) +
+ sizeof_field(struct gre_full_hdr, reserved1);
+ }
+ if (gre->flags & GRE_KEY)
+ offset += sizeof_field(struct gre_full_hdr, key);
+
+ if (gre->flags & GRE_SEQ)
+ offset += sizeof_field(struct gre_full_hdr, seq);
+ break;
+ default:
+ return -1;
+ }
+
+ pkt->inneroff = thoff + offset;
+ }
+ break;
+ case IPPROTO_IPIP:
+ pkt->inneroff = thoff;
+ break;
default:
return -1;
}
@@ -109,7 +144,7 @@ static int __nft_payload_inner_offset(struct nft_pktinfo *pkt)
return 0;
}
-static int nft_payload_inner_offset(const struct nft_pktinfo *pkt)
+int nft_payload_inner_offset(const struct nft_pktinfo *pkt)
{
if (!(pkt->flags & NFT_PKTINFO_INNER) &&
__nft_payload_inner_offset((struct nft_pktinfo *)pkt) < 0)
@@ -552,6 +587,92 @@ const struct nft_expr_ops nft_payload_fast_ops = {
.offload = nft_payload_offload,
};
+void nft_payload_inner_eval(const struct nft_expr *expr, struct nft_regs *regs,
+ const struct nft_pktinfo *pkt,
+ struct nft_inner_tun_ctx *tun_ctx)
+{
+ const struct nft_payload *priv = nft_expr_priv(expr);
+ const struct sk_buff *skb = pkt->skb;
+ u32 *dest = &regs->data[priv->dreg];
+ int offset;
+
+ if (priv->len % NFT_REG32_SIZE)
+ dest[priv->len / NFT_REG32_SIZE] = 0;
+
+ switch (priv->base) {
+ case NFT_PAYLOAD_TUN_HEADER:
+ if (!(tun_ctx->flags & NFT_PAYLOAD_CTX_INNER_TUN))
+ goto err;
+
+ offset = tun_ctx->inner_tunoff;
+ break;
+ case NFT_PAYLOAD_LL_HEADER:
+ if (!(tun_ctx->flags & NFT_PAYLOAD_CTX_INNER_LL))
+ goto err;
+
+ offset = tun_ctx->inner_lloff;
+ break;
+ case NFT_PAYLOAD_NETWORK_HEADER:
+ if (!(tun_ctx->flags & NFT_PAYLOAD_CTX_INNER_NH))
+ goto err;
+
+ offset = tun_ctx->inner_nhoff;
+ break;
+ case NFT_PAYLOAD_TRANSPORT_HEADER:
+ if (!(tun_ctx->flags & NFT_PAYLOAD_CTX_INNER_TH))
+ goto err;
+
+ offset = tun_ctx->inner_thoff;
+ break;
+ default:
+ WARN_ON_ONCE(1);
+ goto err;
+ }
+ offset += priv->offset;
+
+ if (skb_copy_bits(skb, offset, dest, priv->len) < 0)
+ goto err;
+
+ return;
+err:
+ regs->verdict.code = NFT_BREAK;
+}
+
+static int nft_payload_inner_init(const struct nft_ctx *ctx,
+ const struct nft_expr *expr,
+ const struct nlattr * const tb[])
+{
+ struct nft_payload *priv = nft_expr_priv(expr);
+ u32 base;
+
+ base = ntohl(nla_get_be32(tb[NFTA_PAYLOAD_BASE]));
+ switch (base) {
+ case NFT_PAYLOAD_TUN_HEADER:
+ case NFT_PAYLOAD_LL_HEADER:
+ case NFT_PAYLOAD_NETWORK_HEADER:
+ case NFT_PAYLOAD_TRANSPORT_HEADER:
+ break;
+ default:
+ return -EOPNOTSUPP;
+ }
+
+ priv->base = base;
+ priv->offset = ntohl(nla_get_be32(tb[NFTA_PAYLOAD_OFFSET]));
+ priv->len = ntohl(nla_get_be32(tb[NFTA_PAYLOAD_LEN]));
+
+ return nft_parse_register_store(ctx, tb[NFTA_PAYLOAD_DREG],
+ &priv->dreg, NULL, NFT_DATA_VALUE,
+ priv->len);
+}
+
+static const struct nft_expr_ops nft_payload_inner_ops = {
+ .type = &nft_payload_type,
+ .size = NFT_EXPR_SIZE(sizeof(struct nft_payload)),
+ .init = nft_payload_inner_init,
+ .dump = nft_payload_dump,
+ /* direct call to nft_payload_inner_eval(). */
+};
+
static inline void nft_csum_replace(__sum16 *sum, __wsum fsum, __wsum tsum)
{
*sum = csum_fold(csum_add(csum_sub(~csum_unfold(*sum), fsum), tsum));
@@ -665,6 +786,16 @@ static int nft_payload_csum_inet(struct sk_buff *skb, const u32 *src,
return 0;
}
+struct nft_payload_set {
+ enum nft_payload_bases base:8;
+ u8 offset;
+ u8 len;
+ u8 sreg;
+ u8 csum_type;
+ u8 csum_offset;
+ u8 csum_flags;
+};
+
static void nft_payload_set_eval(const struct nft_expr *expr,
struct nft_regs *regs,
const struct nft_pktinfo *pkt)
@@ -885,6 +1016,7 @@ nft_payload_select_ops(const struct nft_ctx *ctx,
struct nft_expr_type nft_payload_type __read_mostly = {
.name = "payload",
.select_ops = nft_payload_select_ops,
+ .inner_ops = &nft_payload_inner_ops,
.policy = nft_payload_policy,
.maxattr = NFTA_PAYLOAD_MAX,
.owner = THIS_MODULE,
diff --git a/net/netlink/af_netlink.c b/net/netlink/af_netlink.c
index a662e8a5ff84..9ebdf3262015 100644
--- a/net/netlink/af_netlink.c
+++ b/net/netlink/af_netlink.c
@@ -812,6 +812,17 @@ static int netlink_release(struct socket *sock)
}
sock_prot_inuse_add(sock_net(sk), &netlink_proto, -1);
+
+ /* Because struct net might disappear soon, do not keep a pointer. */
+ if (!sk->sk_net_refcnt && sock_net(sk) != &init_net) {
+ __netns_tracker_free(sock_net(sk), &sk->ns_tracker, false);
+ /* Because of deferred_put_nlk_sk and use of work queue,
+ * it is possible netns will be freed before this socket.
+ */
+ sock_net_set(sk, &init_net);
+ __netns_tracker_alloc(&init_net, &sk->ns_tracker,
+ false, GFP_KERNEL);
+ }
call_rcu(&nlk->rcu, deferred_put_nlk_sk);
return 0;
}
@@ -2488,19 +2499,24 @@ void netlink_ack(struct sk_buff *in_skb, struct nlmsghdr *nlh, int err,
flags |= NLM_F_ACK_TLVS;
skb = nlmsg_new(payload + tlvlen, GFP_KERNEL);
- if (!skb) {
- NETLINK_CB(in_skb).sk->sk_err = ENOBUFS;
- sk_error_report(NETLINK_CB(in_skb).sk);
- return;
- }
+ if (!skb)
+ goto err_skb;
rep = nlmsg_put(skb, NETLINK_CB(in_skb).portid, nlh->nlmsg_seq,
- NLMSG_ERROR, payload, flags);
+ NLMSG_ERROR, sizeof(*errmsg), flags);
+ if (!rep)
+ goto err_bad_put;
errmsg = nlmsg_data(rep);
errmsg->error = err;
- unsafe_memcpy(&errmsg->msg, nlh, payload > sizeof(*errmsg)
- ? nlh->nlmsg_len : sizeof(*nlh),
- /* Bounds checked by the skb layer. */);
+ errmsg->msg = *nlh;
+
+ if (!(flags & NLM_F_CAPPED)) {
+ if (!nlmsg_append(skb, nlmsg_len(nlh)))
+ goto err_bad_put;
+
+ memcpy(errmsg->msg.nlmsg_data, nlh->nlmsg_data,
+ nlmsg_len(nlh));
+ }
if (tlvlen)
netlink_ack_tlv_fill(in_skb, skb, nlh, err, extack);
@@ -2508,6 +2524,14 @@ void netlink_ack(struct sk_buff *in_skb, struct nlmsghdr *nlh, int err,
nlmsg_end(skb, rep);
nlmsg_unicast(in_skb->sk, skb, NETLINK_CB(in_skb).portid);
+
+ return;
+
+err_bad_put:
+ nlmsg_free(skb);
+err_skb:
+ NETLINK_CB(in_skb).sk->sk_err = ENOBUFS;
+ sk_error_report(NETLINK_CB(in_skb).sk);
}
EXPORT_SYMBOL(netlink_ack);
diff --git a/net/netlink/genetlink.c b/net/netlink/genetlink.c
index 3e16527beb91..600993c80050 100644
--- a/net/netlink/genetlink.c
+++ b/net/netlink/genetlink.c
@@ -101,6 +101,17 @@ genl_op_fill_in_reject_policy(const struct genl_family *family,
op->maxattr = 1;
}
+static void
+genl_op_fill_in_reject_policy_split(const struct genl_family *family,
+ struct genl_split_ops *op)
+{
+ if (op->policy)
+ return;
+
+ op->policy = genl_policy_reject_all;
+ op->maxattr = 1;
+}
+
static const struct genl_family *genl_family_find_byid(unsigned int id)
{
return idr_find(&genl_fam_idr, id);
@@ -118,10 +129,15 @@ static const struct genl_family *genl_family_find_byname(char *name)
return NULL;
}
-static int genl_get_cmd_cnt(const struct genl_family *family)
-{
- return family->n_ops + family->n_small_ops;
-}
+struct genl_op_iter {
+ const struct genl_family *family;
+ struct genl_split_ops doit;
+ struct genl_split_ops dumpit;
+ int cmd_idx;
+ int entry_idx;
+ u32 cmd;
+ u8 flags;
+};
static void genl_op_from_full(const struct genl_family *family,
unsigned int i, struct genl_ops *op)
@@ -181,24 +197,187 @@ static int genl_get_cmd_small(u32 cmd, const struct genl_family *family,
return -ENOENT;
}
-static int genl_get_cmd(u32 cmd, const struct genl_family *family,
- struct genl_ops *op)
+static void genl_op_from_split(struct genl_op_iter *iter)
{
- if (!genl_get_cmd_full(cmd, family, op))
- return 0;
- return genl_get_cmd_small(cmd, family, op);
+ const struct genl_family *family = iter->family;
+ int i, cnt = 0;
+
+ i = iter->entry_idx - family->n_ops - family->n_small_ops;
+
+ if (family->split_ops[i + cnt].flags & GENL_CMD_CAP_DO) {
+ iter->doit = family->split_ops[i + cnt];
+ genl_op_fill_in_reject_policy_split(family, &iter->doit);
+ cnt++;
+ } else {
+ memset(&iter->doit, 0, sizeof(iter->doit));
+ }
+
+ if (i + cnt < family->n_split_ops &&
+ family->split_ops[i + cnt].flags & GENL_CMD_CAP_DUMP) {
+ iter->dumpit = family->split_ops[i + cnt];
+ genl_op_fill_in_reject_policy_split(family, &iter->dumpit);
+ cnt++;
+ } else {
+ memset(&iter->dumpit, 0, sizeof(iter->dumpit));
+ }
+
+ WARN_ON(!cnt);
+ iter->entry_idx += cnt;
}
-static void genl_get_cmd_by_index(unsigned int i,
- const struct genl_family *family,
- struct genl_ops *op)
+static int
+genl_get_cmd_split(u32 cmd, u8 flag, const struct genl_family *family,
+ struct genl_split_ops *op)
{
- if (i < family->n_ops)
- genl_op_from_full(family, i, op);
- else if (i < family->n_ops + family->n_small_ops)
- genl_op_from_small(family, i - family->n_ops, op);
- else
- WARN_ON_ONCE(1);
+ int i;
+
+ for (i = 0; i < family->n_split_ops; i++)
+ if (family->split_ops[i].cmd == cmd &&
+ family->split_ops[i].flags & flag) {
+ *op = family->split_ops[i];
+ return 0;
+ }
+
+ return -ENOENT;
+}
+
+static int
+genl_cmd_full_to_split(struct genl_split_ops *op,
+ const struct genl_family *family,
+ const struct genl_ops *full, u8 flags)
+{
+ if ((flags & GENL_CMD_CAP_DO && !full->doit) ||
+ (flags & GENL_CMD_CAP_DUMP && !full->dumpit)) {
+ memset(op, 0, sizeof(*op));
+ return -ENOENT;
+ }
+
+ if (flags & GENL_CMD_CAP_DUMP) {
+ op->start = full->start;
+ op->dumpit = full->dumpit;
+ op->done = full->done;
+ } else {
+ op->pre_doit = family->pre_doit;
+ op->doit = full->doit;
+ op->post_doit = family->post_doit;
+ }
+
+ if (flags & GENL_CMD_CAP_DUMP &&
+ full->validate & GENL_DONT_VALIDATE_DUMP) {
+ op->policy = NULL;
+ op->maxattr = 0;
+ } else {
+ op->policy = full->policy;
+ op->maxattr = full->maxattr;
+ }
+
+ op->cmd = full->cmd;
+ op->internal_flags = full->internal_flags;
+ op->flags = full->flags;
+ op->validate = full->validate;
+
+ /* Make sure flags include the GENL_CMD_CAP_DO / GENL_CMD_CAP_DUMP */
+ op->flags |= flags;
+
+ return 0;
+}
+
+/* Must make sure that op is initialized to 0 on failure */
+static int
+genl_get_cmd(u32 cmd, u8 flags, const struct genl_family *family,
+ struct genl_split_ops *op)
+{
+ struct genl_ops full;
+ int err;
+
+ err = genl_get_cmd_full(cmd, family, &full);
+ if (err == -ENOENT)
+ err = genl_get_cmd_small(cmd, family, &full);
+ /* Found one of legacy forms */
+ if (err == 0)
+ return genl_cmd_full_to_split(op, family, &full, flags);
+
+ err = genl_get_cmd_split(cmd, flags, family, op);
+ if (err)
+ memset(op, 0, sizeof(*op));
+ return err;
+}
+
+/* For policy dumping only, get ops of both do and dump.
+ * Fail if both are missing, genl_get_cmd() will zero-init in case of failure.
+ */
+static int
+genl_get_cmd_both(u32 cmd, const struct genl_family *family,
+ struct genl_split_ops *doit, struct genl_split_ops *dumpit)
+{
+ int err1, err2;
+
+ err1 = genl_get_cmd(cmd, GENL_CMD_CAP_DO, family, doit);
+ err2 = genl_get_cmd(cmd, GENL_CMD_CAP_DUMP, family, dumpit);
+
+ return err1 && err2 ? -ENOENT : 0;
+}
+
+static bool
+genl_op_iter_init(const struct genl_family *family, struct genl_op_iter *iter)
+{
+ iter->family = family;
+ iter->cmd_idx = 0;
+ iter->entry_idx = 0;
+
+ iter->flags = 0;
+
+ return iter->family->n_ops +
+ iter->family->n_small_ops +
+ iter->family->n_split_ops;
+}
+
+static bool genl_op_iter_next(struct genl_op_iter *iter)
+{
+ const struct genl_family *family = iter->family;
+ bool legacy_op = true;
+ struct genl_ops op;
+
+ if (iter->entry_idx < family->n_ops) {
+ genl_op_from_full(family, iter->entry_idx, &op);
+ } else if (iter->entry_idx < family->n_ops + family->n_small_ops) {
+ genl_op_from_small(family, iter->entry_idx - family->n_ops,
+ &op);
+ } else if (iter->entry_idx <
+ family->n_ops + family->n_small_ops + family->n_split_ops) {
+ legacy_op = false;
+ /* updates entry_idx */
+ genl_op_from_split(iter);
+ } else {
+ return false;
+ }
+
+ iter->cmd_idx++;
+
+ if (legacy_op) {
+ iter->entry_idx++;
+
+ genl_cmd_full_to_split(&iter->doit, family,
+ &op, GENL_CMD_CAP_DO);
+ genl_cmd_full_to_split(&iter->dumpit, family,
+ &op, GENL_CMD_CAP_DUMP);
+ }
+
+ iter->cmd = iter->doit.cmd | iter->dumpit.cmd;
+ iter->flags = iter->doit.flags | iter->dumpit.flags;
+
+ return true;
+}
+
+static void
+genl_op_iter_copy(struct genl_op_iter *dst, struct genl_op_iter *src)
+{
+ *dst = *src;
+}
+
+static unsigned int genl_op_iter_idx(struct genl_op_iter *iter)
+{
+ return iter->cmd_idx;
}
static int genl_allocate_reserve_groups(int n_groups, int *first_id)
@@ -366,31 +545,72 @@ static void genl_unregister_mc_groups(const struct genl_family *family)
}
}
+static bool genl_split_op_check(const struct genl_split_ops *op)
+{
+ if (WARN_ON(hweight8(op->flags & (GENL_CMD_CAP_DO |
+ GENL_CMD_CAP_DUMP)) != 1))
+ return true;
+ return false;
+}
+
static int genl_validate_ops(const struct genl_family *family)
{
- int i, j;
+ struct genl_op_iter i, j;
+ unsigned int s;
if (WARN_ON(family->n_ops && !family->ops) ||
- WARN_ON(family->n_small_ops && !family->small_ops))
+ WARN_ON(family->n_small_ops && !family->small_ops) ||
+ WARN_ON(family->n_split_ops && !family->split_ops))
return -EINVAL;
- for (i = 0; i < genl_get_cmd_cnt(family); i++) {
- struct genl_ops op;
-
- genl_get_cmd_by_index(i, family, &op);
- if (op.dumpit == NULL && op.doit == NULL)
+ for (genl_op_iter_init(family, &i); genl_op_iter_next(&i); ) {
+ if (!(i.flags & (GENL_CMD_CAP_DO | GENL_CMD_CAP_DUMP)))
return -EINVAL;
- if (WARN_ON(op.cmd >= family->resv_start_op && op.validate))
+
+ if (WARN_ON(i.cmd >= family->resv_start_op &&
+ (i.doit.validate || i.dumpit.validate)))
return -EINVAL;
- for (j = i + 1; j < genl_get_cmd_cnt(family); j++) {
- struct genl_ops op2;
- genl_get_cmd_by_index(j, family, &op2);
- if (op.cmd == op2.cmd)
+ genl_op_iter_copy(&j, &i);
+ while (genl_op_iter_next(&j)) {
+ if (i.cmd == j.cmd)
return -EINVAL;
}
}
+ if (family->n_split_ops) {
+ if (genl_split_op_check(&family->split_ops[0]))
+ return -EINVAL;
+ }
+
+ for (s = 1; s < family->n_split_ops; s++) {
+ const struct genl_split_ops *a, *b;
+
+ a = &family->split_ops[s - 1];
+ b = &family->split_ops[s];
+
+ if (genl_split_op_check(b))
+ return -EINVAL;
+
+ /* Check sort order */
+ if (a->cmd < b->cmd)
+ continue;
+
+ if (a->internal_flags != b->internal_flags ||
+ ((a->flags ^ b->flags) & ~(GENL_CMD_CAP_DO |
+ GENL_CMD_CAP_DUMP))) {
+ WARN_ON(1);
+ return -EINVAL;
+ }
+
+ if ((a->flags & GENL_CMD_CAP_DO) &&
+ (b->flags & GENL_CMD_CAP_DUMP))
+ continue;
+
+ WARN_ON(1);
+ return -EINVAL;
+ }
+
return 0;
}
@@ -544,7 +764,7 @@ static struct nlattr **
genl_family_rcv_msg_attrs_parse(const struct genl_family *family,
struct nlmsghdr *nlh,
struct netlink_ext_ack *extack,
- const struct genl_ops *ops,
+ const struct genl_split_ops *ops,
int hdrlen,
enum genl_validate_flags no_strict_flag)
{
@@ -580,22 +800,21 @@ struct genl_start_context {
const struct genl_family *family;
struct nlmsghdr *nlh;
struct netlink_ext_ack *extack;
- const struct genl_ops *ops;
+ const struct genl_split_ops *ops;
int hdrlen;
};
static int genl_start(struct netlink_callback *cb)
{
struct genl_start_context *ctx = cb->data;
- const struct genl_ops *ops = ctx->ops;
+ const struct genl_split_ops *ops;
struct genl_dumpit_info *info;
struct nlattr **attrs = NULL;
int rc = 0;
- if (ops->validate & GENL_DONT_VALIDATE_DUMP)
- goto no_attrs;
-
- if (ctx->nlh->nlmsg_len < nlmsg_msg_size(ctx->hdrlen))
+ ops = ctx->ops;
+ if (!(ops->validate & GENL_DONT_VALIDATE_DUMP) &&
+ ctx->nlh->nlmsg_len < nlmsg_msg_size(ctx->hdrlen))
return -EINVAL;
attrs = genl_family_rcv_msg_attrs_parse(ctx->family, ctx->nlh, ctx->extack,
@@ -604,7 +823,6 @@ static int genl_start(struct netlink_callback *cb)
if (IS_ERR(attrs))
return PTR_ERR(attrs);
-no_attrs:
info = genl_dumpit_info_alloc();
if (!info) {
genl_family_rcv_msg_attrs_free(attrs);
@@ -633,7 +851,7 @@ no_attrs:
static int genl_lock_dumpit(struct sk_buff *skb, struct netlink_callback *cb)
{
- const struct genl_ops *ops = &genl_dumpit_info(cb)->op;
+ const struct genl_split_ops *ops = &genl_dumpit_info(cb)->op;
int rc;
genl_lock();
@@ -645,7 +863,7 @@ static int genl_lock_dumpit(struct sk_buff *skb, struct netlink_callback *cb)
static int genl_lock_done(struct netlink_callback *cb)
{
const struct genl_dumpit_info *info = genl_dumpit_info(cb);
- const struct genl_ops *ops = &info->op;
+ const struct genl_split_ops *ops = &info->op;
int rc = 0;
if (ops->done) {
@@ -661,7 +879,7 @@ static int genl_lock_done(struct netlink_callback *cb)
static int genl_parallel_done(struct netlink_callback *cb)
{
const struct genl_dumpit_info *info = genl_dumpit_info(cb);
- const struct genl_ops *ops = &info->op;
+ const struct genl_split_ops *ops = &info->op;
int rc = 0;
if (ops->done)
@@ -675,15 +893,12 @@ static int genl_family_rcv_msg_dumpit(const struct genl_family *family,
struct sk_buff *skb,
struct nlmsghdr *nlh,
struct netlink_ext_ack *extack,
- const struct genl_ops *ops,
+ const struct genl_split_ops *ops,
int hdrlen, struct net *net)
{
struct genl_start_context ctx;
int err;
- if (!ops->dumpit)
- return -EOPNOTSUPP;
-
ctx.family = family;
ctx.nlh = nlh;
ctx.extack = extack;
@@ -721,16 +936,13 @@ static int genl_family_rcv_msg_doit(const struct genl_family *family,
struct sk_buff *skb,
struct nlmsghdr *nlh,
struct netlink_ext_ack *extack,
- const struct genl_ops *ops,
+ const struct genl_split_ops *ops,
int hdrlen, struct net *net)
{
struct nlattr **attrbuf;
struct genl_info info;
int err;
- if (!ops->doit)
- return -EOPNOTSUPP;
-
attrbuf = genl_family_rcv_msg_attrs_parse(family, nlh, extack,
ops, hdrlen,
GENL_DONT_VALIDATE_STRICT);
@@ -747,16 +959,16 @@ static int genl_family_rcv_msg_doit(const struct genl_family *family,
genl_info_net_set(&info, net);
memset(&info.user_ptr, 0, sizeof(info.user_ptr));
- if (family->pre_doit) {
- err = family->pre_doit(ops, skb, &info);
+ if (ops->pre_doit) {
+ err = ops->pre_doit(ops, skb, &info);
if (err)
goto out;
}
err = ops->doit(skb, &info);
- if (family->post_doit)
- family->post_doit(ops, skb, &info);
+ if (ops->post_doit)
+ ops->post_doit(ops, skb, &info);
out:
genl_family_rcv_msg_attrs_free(attrbuf);
@@ -801,8 +1013,9 @@ static int genl_family_rcv_msg(const struct genl_family *family,
{
struct net *net = sock_net(skb->sk);
struct genlmsghdr *hdr = nlmsg_data(nlh);
- struct genl_ops op;
+ struct genl_split_ops op;
int hdrlen;
+ u8 flags;
/* this family doesn't exist in this netns */
if (!family->netnsok && !net_eq(net, &init_net))
@@ -815,7 +1028,9 @@ static int genl_family_rcv_msg(const struct genl_family *family,
if (genl_header_check(family, nlh, hdr, extack))
return -EINVAL;
- if (genl_get_cmd(hdr->cmd, family, &op))
+ flags = (nlh->nlmsg_flags & NLM_F_DUMP) == NLM_F_DUMP ?
+ GENL_CMD_CAP_DUMP : GENL_CMD_CAP_DO;
+ if (genl_get_cmd(hdr->cmd, flags, family, &op))
return -EOPNOTSUPP;
if ((op.flags & GENL_ADMIN_PERM) &&
@@ -826,7 +1041,7 @@ static int genl_family_rcv_msg(const struct genl_family *family,
!netlink_ns_capable(skb, net->user_ns, CAP_NET_ADMIN))
return -EPERM;
- if ((nlh->nlmsg_flags & NLM_F_DUMP) == NLM_F_DUMP)
+ if (flags & GENL_CMD_CAP_DUMP)
return genl_family_rcv_msg_dumpit(family, skb, nlh, extack,
&op, hdrlen, net);
else
@@ -871,6 +1086,7 @@ static struct genl_family genl_ctrl;
static int ctrl_fill_info(const struct genl_family *family, u32 portid, u32 seq,
u32 flags, struct sk_buff *skb, u8 cmd)
{
+ struct genl_op_iter i;
void *hdr;
hdr = genlmsg_put(skb, portid, seq, &genl_ctrl, flags, cmd);
@@ -884,33 +1100,26 @@ static int ctrl_fill_info(const struct genl_family *family, u32 portid, u32 seq,
nla_put_u32(skb, CTRL_ATTR_MAXATTR, family->maxattr))
goto nla_put_failure;
- if (genl_get_cmd_cnt(family)) {
+ if (genl_op_iter_init(family, &i)) {
struct nlattr *nla_ops;
- int i;
nla_ops = nla_nest_start_noflag(skb, CTRL_ATTR_OPS);
if (nla_ops == NULL)
goto nla_put_failure;
- for (i = 0; i < genl_get_cmd_cnt(family); i++) {
+ while (genl_op_iter_next(&i)) {
struct nlattr *nest;
- struct genl_ops op;
u32 op_flags;
- genl_get_cmd_by_index(i, family, &op);
- op_flags = op.flags;
- if (op.dumpit)
- op_flags |= GENL_CMD_CAP_DUMP;
- if (op.doit)
- op_flags |= GENL_CMD_CAP_DO;
- if (op.policy)
+ op_flags = i.flags;
+ if (i.doit.policy || i.dumpit.policy)
op_flags |= GENL_CMD_CAP_HASPOL;
- nest = nla_nest_start_noflag(skb, i + 1);
+ nest = nla_nest_start_noflag(skb, genl_op_iter_idx(&i));
if (nest == NULL)
goto nla_put_failure;
- if (nla_put_u32(skb, CTRL_ATTR_OP_ID, op.cmd) ||
+ if (nla_put_u32(skb, CTRL_ATTR_OP_ID, i.cmd) ||
nla_put_u32(skb, CTRL_ATTR_OP_FLAGS, op_flags))
goto nla_put_failure;
@@ -1163,10 +1372,10 @@ static int genl_ctrl_event(int event, const struct genl_family *family,
struct ctrl_dump_policy_ctx {
struct netlink_policy_dump_state *state;
const struct genl_family *rt;
- unsigned int opidx;
+ struct genl_op_iter *op_iter;
u32 op;
u16 fam_id;
- u8 policies:1,
+ u8 dump_map:1,
single_op:1;
};
@@ -1183,8 +1392,8 @@ static int ctrl_dumppolicy_start(struct netlink_callback *cb)
struct ctrl_dump_policy_ctx *ctx = (void *)cb->ctx;
struct nlattr **tb = info->attrs;
const struct genl_family *rt;
- struct genl_ops op;
- int err, i;
+ struct genl_op_iter i;
+ int err;
BUILD_BUG_ON(sizeof(*ctx) > sizeof(cb->ctx));
@@ -1208,40 +1417,73 @@ static int ctrl_dumppolicy_start(struct netlink_callback *cb)
ctx->rt = rt;
if (tb[CTRL_ATTR_OP]) {
+ struct genl_split_ops doit, dump;
+
ctx->single_op = true;
ctx->op = nla_get_u32(tb[CTRL_ATTR_OP]);
- err = genl_get_cmd(ctx->op, rt, &op);
+ err = genl_get_cmd_both(ctx->op, rt, &doit, &dump);
if (err) {
NL_SET_BAD_ATTR(cb->extack, tb[CTRL_ATTR_OP]);
return err;
}
- if (!op.policy)
+ if (doit.policy) {
+ err = netlink_policy_dump_add_policy(&ctx->state,
+ doit.policy,
+ doit.maxattr);
+ if (err)
+ goto err_free_state;
+ }
+ if (dump.policy) {
+ err = netlink_policy_dump_add_policy(&ctx->state,
+ dump.policy,
+ dump.maxattr);
+ if (err)
+ goto err_free_state;
+ }
+
+ if (!ctx->state)
return -ENODATA;
- return netlink_policy_dump_add_policy(&ctx->state, op.policy,
- op.maxattr);
+ ctx->dump_map = 1;
+ return 0;
}
- for (i = 0; i < genl_get_cmd_cnt(rt); i++) {
- genl_get_cmd_by_index(i, rt, &op);
+ ctx->op_iter = kmalloc(sizeof(*ctx->op_iter), GFP_KERNEL);
+ if (!ctx->op_iter)
+ return -ENOMEM;
- if (op.policy) {
+ genl_op_iter_init(rt, ctx->op_iter);
+ ctx->dump_map = genl_op_iter_next(ctx->op_iter);
+
+ for (genl_op_iter_init(rt, &i); genl_op_iter_next(&i); ) {
+ if (i.doit.policy) {
err = netlink_policy_dump_add_policy(&ctx->state,
- op.policy,
- op.maxattr);
+ i.doit.policy,
+ i.doit.maxattr);
+ if (err)
+ goto err_free_state;
+ }
+ if (i.dumpit.policy) {
+ err = netlink_policy_dump_add_policy(&ctx->state,
+ i.dumpit.policy,
+ i.dumpit.maxattr);
if (err)
goto err_free_state;
}
}
- if (!ctx->state)
- return -ENODATA;
+ if (!ctx->state) {
+ err = -ENODATA;
+ goto err_free_op_iter;
+ }
return 0;
err_free_state:
netlink_policy_dump_free(ctx->state);
+err_free_op_iter:
+ kfree(ctx->op_iter);
return err;
}
@@ -1265,7 +1507,8 @@ static void *ctrl_dumppolicy_prep(struct sk_buff *skb,
static int ctrl_dumppolicy_put_op(struct sk_buff *skb,
struct netlink_callback *cb,
- struct genl_ops *op)
+ struct genl_split_ops *doit,
+ struct genl_split_ops *dumpit)
{
struct ctrl_dump_policy_ctx *ctx = (void *)cb->ctx;
struct nlattr *nest_pol, *nest_op;
@@ -1273,10 +1516,7 @@ static int ctrl_dumppolicy_put_op(struct sk_buff *skb,
int idx;
/* skip if we have nothing to show */
- if (!op->policy)
- return 0;
- if (!op->doit &&
- (!op->dumpit || op->validate & GENL_DONT_VALIDATE_DUMP))
+ if (!doit->policy && !dumpit->policy)
return 0;
hdr = ctrl_dumppolicy_prep(skb, cb);
@@ -1287,21 +1527,26 @@ static int ctrl_dumppolicy_put_op(struct sk_buff *skb,
if (!nest_pol)
goto err;
- nest_op = nla_nest_start(skb, op->cmd);
+ nest_op = nla_nest_start(skb, doit->cmd);
if (!nest_op)
goto err;
- /* for now both do/dump are always the same */
- idx = netlink_policy_dump_get_policy_idx(ctx->state,
- op->policy,
- op->maxattr);
+ if (doit->policy) {
+ idx = netlink_policy_dump_get_policy_idx(ctx->state,
+ doit->policy,
+ doit->maxattr);
- if (op->doit && nla_put_u32(skb, CTRL_ATTR_POLICY_DO, idx))
- goto err;
+ if (nla_put_u32(skb, CTRL_ATTR_POLICY_DO, idx))
+ goto err;
+ }
+ if (dumpit->policy) {
+ idx = netlink_policy_dump_get_policy_idx(ctx->state,
+ dumpit->policy,
+ dumpit->maxattr);
- if (op->dumpit && !(op->validate & GENL_DONT_VALIDATE_DUMP) &&
- nla_put_u32(skb, CTRL_ATTR_POLICY_DUMP, idx))
- goto err;
+ if (nla_put_u32(skb, CTRL_ATTR_POLICY_DUMP, idx))
+ goto err;
+ }
nla_nest_end(skb, nest_op);
nla_nest_end(skb, nest_pol);
@@ -1318,31 +1563,29 @@ static int ctrl_dumppolicy(struct sk_buff *skb, struct netlink_callback *cb)
struct ctrl_dump_policy_ctx *ctx = (void *)cb->ctx;
void *hdr;
- if (!ctx->policies) {
- while (ctx->opidx < genl_get_cmd_cnt(ctx->rt)) {
- struct genl_ops op;
+ if (ctx->dump_map) {
+ if (ctx->single_op) {
+ struct genl_split_ops doit, dumpit;
- if (ctx->single_op) {
- int err;
+ if (WARN_ON(genl_get_cmd_both(ctx->op, ctx->rt,
+ &doit, &dumpit)))
+ return -ENOENT;
- err = genl_get_cmd(ctx->op, ctx->rt, &op);
- if (WARN_ON(err))
- return skb->len;
+ if (ctrl_dumppolicy_put_op(skb, cb, &doit, &dumpit))
+ return skb->len;
- /* break out of the loop after this one */
- ctx->opidx = genl_get_cmd_cnt(ctx->rt);
- } else {
- genl_get_cmd_by_index(ctx->opidx, ctx->rt, &op);
- }
+ /* done with the per-op policy index list */
+ ctx->dump_map = 0;
+ }
- if (ctrl_dumppolicy_put_op(skb, cb, &op))
+ while (ctx->dump_map) {
+ if (ctrl_dumppolicy_put_op(skb, cb,
+ &ctx->op_iter->doit,
+ &ctx->op_iter->dumpit))
return skb->len;
- ctx->opidx++;
+ ctx->dump_map = genl_op_iter_next(ctx->op_iter);
}
-
- /* completed with the per-op policy index list */
- ctx->policies = true;
}
while (netlink_policy_dump_loop(ctx->state)) {
@@ -1375,18 +1618,27 @@ static int ctrl_dumppolicy_done(struct netlink_callback *cb)
{
struct ctrl_dump_policy_ctx *ctx = (void *)cb->ctx;
+ kfree(ctx->op_iter);
netlink_policy_dump_free(ctx->state);
return 0;
}
-static const struct genl_ops genl_ctrl_ops[] = {
+static const struct genl_split_ops genl_ctrl_ops[] = {
{
.cmd = CTRL_CMD_GETFAMILY,
- .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
+ .validate = GENL_DONT_VALIDATE_STRICT,
.policy = ctrl_policy_family,
.maxattr = ARRAY_SIZE(ctrl_policy_family) - 1,
.doit = ctrl_getfamily,
+ .flags = GENL_CMD_CAP_DO,
+ },
+ {
+ .cmd = CTRL_CMD_GETFAMILY,
+ .validate = GENL_DONT_VALIDATE_DUMP,
+ .policy = ctrl_policy_family,
+ .maxattr = ARRAY_SIZE(ctrl_policy_family) - 1,
.dumpit = ctrl_dumpfamily,
+ .flags = GENL_CMD_CAP_DUMP,
},
{
.cmd = CTRL_CMD_GETPOLICY,
@@ -1395,6 +1647,7 @@ static const struct genl_ops genl_ctrl_ops[] = {
.start = ctrl_dumppolicy_start,
.dumpit = ctrl_dumppolicy,
.done = ctrl_dumppolicy_done,
+ .flags = GENL_CMD_CAP_DUMP,
},
};
@@ -1404,8 +1657,8 @@ static const struct genl_multicast_group genl_ctrl_groups[] = {
static struct genl_family genl_ctrl __ro_after_init = {
.module = THIS_MODULE,
- .ops = genl_ctrl_ops,
- .n_ops = ARRAY_SIZE(genl_ctrl_ops),
+ .split_ops = genl_ctrl_ops,
+ .n_split_ops = ARRAY_SIZE(genl_ctrl_ops),
.resv_start_op = CTRL_CMD_GETPOLICY + 1,
.mcgrps = genl_ctrl_groups,
.n_mcgrps = ARRAY_SIZE(genl_ctrl_groups),
diff --git a/net/nfc/nci/core.c b/net/nfc/nci/core.c
index 6a193cce2a75..dbe5258e13ff 100644
--- a/net/nfc/nci/core.c
+++ b/net/nfc/nci/core.c
@@ -24,6 +24,7 @@
#include <linux/sched.h>
#include <linux/bitops.h>
#include <linux/skbuff.h>
+#include <linux/kcov.h>
#include "../nfc.h"
#include <net/nfc/nci.h>
@@ -1472,6 +1473,7 @@ static void nci_tx_work(struct work_struct *work)
skb = skb_dequeue(&ndev->tx_q);
if (!skb)
return;
+ kcov_remote_start_common(skb_get_kcov_handle(skb));
/* Check if data flow control is used */
if (atomic_read(&conn_info->credits_cnt) !=
@@ -1487,6 +1489,7 @@ static void nci_tx_work(struct work_struct *work)
mod_timer(&ndev->data_timer,
jiffies + msecs_to_jiffies(NCI_DATA_TIMEOUT));
+ kcov_remote_stop();
}
}
@@ -1497,7 +1500,8 @@ static void nci_rx_work(struct work_struct *work)
struct nci_dev *ndev = container_of(work, struct nci_dev, rx_work);
struct sk_buff *skb;
- while ((skb = skb_dequeue(&ndev->rx_q))) {
+ for (; (skb = skb_dequeue(&ndev->rx_q)); kcov_remote_stop()) {
+ kcov_remote_start_common(skb_get_kcov_handle(skb));
/* Send copy to sniffer */
nfc_send_to_raw_sock(ndev->nfc_dev, skb,
@@ -1551,6 +1555,7 @@ static void nci_cmd_work(struct work_struct *work)
if (!skb)
return;
+ kcov_remote_start_common(skb_get_kcov_handle(skb));
atomic_dec(&ndev->cmd_cnt);
pr_debug("NCI TX: MT=cmd, PBF=%d, GID=0x%x, OID=0x%x, plen=%d\n",
@@ -1563,6 +1568,7 @@ static void nci_cmd_work(struct work_struct *work)
mod_timer(&ndev->cmd_timer,
jiffies + msecs_to_jiffies(NCI_CMD_TIMEOUT));
+ kcov_remote_stop();
}
}
diff --git a/net/nfc/nci/hci.c b/net/nfc/nci/hci.c
index 78c4b6addf15..de175318a3a0 100644
--- a/net/nfc/nci/hci.c
+++ b/net/nfc/nci/hci.c
@@ -14,6 +14,7 @@
#include <net/nfc/nci.h>
#include <net/nfc/nci_core.h>
#include <linux/nfc.h>
+#include <linux/kcov.h>
struct nci_data {
u8 conn_id;
@@ -409,7 +410,8 @@ static void nci_hci_msg_rx_work(struct work_struct *work)
const struct nci_hcp_message *message;
u8 pipe, type, instruction;
- while ((skb = skb_dequeue(&hdev->msg_rx_queue)) != NULL) {
+ for (; (skb = skb_dequeue(&hdev->msg_rx_queue)); kcov_remote_stop()) {
+ kcov_remote_start_common(skb_get_kcov_handle(skb));
pipe = NCI_HCP_MSG_GET_PIPE(skb->data[0]);
skb_pull(skb, NCI_HCI_HCP_PACKET_HEADER_LEN);
message = (struct nci_hcp_message *)skb->data;
diff --git a/net/nfc/rawsock.c b/net/nfc/rawsock.c
index 8dd569765f96..5125392bb68e 100644
--- a/net/nfc/rawsock.c
+++ b/net/nfc/rawsock.c
@@ -12,6 +12,7 @@
#include <net/tcp_states.h>
#include <linux/nfc.h>
#include <linux/export.h>
+#include <linux/kcov.h>
#include "nfc.h"
@@ -189,6 +190,7 @@ static void rawsock_tx_work(struct work_struct *work)
}
skb = skb_dequeue(&sk->sk_write_queue);
+ kcov_remote_start_common(skb_get_kcov_handle(skb));
sock_hold(sk);
rc = nfc_data_exchange(dev, target_idx, skb,
@@ -197,6 +199,7 @@ static void rawsock_tx_work(struct work_struct *work)
rawsock_report_error(sk, rc);
sock_put(sk);
}
+ kcov_remote_stop();
}
static int rawsock_sendmsg(struct socket *sock, struct msghdr *msg, size_t len)
diff --git a/net/openvswitch/conntrack.c b/net/openvswitch/conntrack.c
index c7b10234cf7c..4348321856af 100644
--- a/net/openvswitch/conntrack.c
+++ b/net/openvswitch/conntrack.c
@@ -434,65 +434,6 @@ static int ovs_ct_set_labels(struct nf_conn *ct, struct sw_flow_key *key,
return 0;
}
-/* 'skb' should already be pulled to nh_ofs. */
-static int ovs_ct_helper(struct sk_buff *skb, u16 proto)
-{
- const struct nf_conntrack_helper *helper;
- const struct nf_conn_help *help;
- enum ip_conntrack_info ctinfo;
- unsigned int protoff;
- struct nf_conn *ct;
- int err;
-
- ct = nf_ct_get(skb, &ctinfo);
- if (!ct || ctinfo == IP_CT_RELATED_REPLY)
- return NF_ACCEPT;
-
- help = nfct_help(ct);
- if (!help)
- return NF_ACCEPT;
-
- helper = rcu_dereference(help->helper);
- if (!helper)
- return NF_ACCEPT;
-
- switch (proto) {
- case NFPROTO_IPV4:
- protoff = ip_hdrlen(skb);
- break;
- case NFPROTO_IPV6: {
- u8 nexthdr = ipv6_hdr(skb)->nexthdr;
- __be16 frag_off;
- int ofs;
-
- ofs = ipv6_skip_exthdr(skb, sizeof(struct ipv6hdr), &nexthdr,
- &frag_off);
- if (ofs < 0 || (frag_off & htons(~0x7)) != 0) {
- pr_debug("proto header not found\n");
- return NF_ACCEPT;
- }
- protoff = ofs;
- break;
- }
- default:
- WARN_ONCE(1, "helper invoked on non-IP family!");
- return NF_DROP;
- }
-
- err = helper->help(skb, protoff, ct, ctinfo);
- if (err != NF_ACCEPT)
- return err;
-
- /* Adjust seqs after helper. This is needed due to some helpers (e.g.,
- * FTP with NAT) adusting the TCP payload size when mangling IP
- * addresses and/or port numbers in the text-based control connection.
- */
- if (test_bit(IPS_SEQ_ADJUST_BIT, &ct->status) &&
- !nf_ct_seq_adjust(skb, ct, ctinfo, protoff))
- return NF_DROP;
- return NF_ACCEPT;
-}
-
/* Returns 0 on success, -EINPROGRESS if 'skb' is stolen, or other nonzero
* value if 'skb' is freed.
*/
@@ -1038,7 +979,7 @@ static int __ovs_ct_lookup(struct net *net, struct sw_flow_key *key,
*/
if ((nf_ct_is_confirmed(ct) ? !cached || add_helper :
info->commit) &&
- ovs_ct_helper(skb, info->family) != NF_ACCEPT) {
+ nf_ct_helper(skb, ct, ctinfo, info->family) != NF_ACCEPT) {
return -EINVAL;
}
@@ -1350,43 +1291,6 @@ int ovs_ct_clear(struct sk_buff *skb, struct sw_flow_key *key)
return 0;
}
-static int ovs_ct_add_helper(struct ovs_conntrack_info *info, const char *name,
- const struct sw_flow_key *key, bool log)
-{
- struct nf_conntrack_helper *helper;
- struct nf_conn_help *help;
- int ret = 0;
-
- helper = nf_conntrack_helper_try_module_get(name, info->family,
- key->ip.proto);
- if (!helper) {
- OVS_NLERR(log, "Unknown helper \"%s\"", name);
- return -EINVAL;
- }
-
- help = nf_ct_helper_ext_add(info->ct, GFP_KERNEL);
- if (!help) {
- nf_conntrack_helper_put(helper);
- return -ENOMEM;
- }
-
-#if IS_ENABLED(CONFIG_NF_NAT)
- if (info->nat) {
- ret = nf_nat_helper_try_module_get(name, info->family,
- key->ip.proto);
- if (ret) {
- nf_conntrack_helper_put(helper);
- OVS_NLERR(log, "Failed to load \"%s\" NAT helper, error: %d",
- name, ret);
- return ret;
- }
- }
-#endif
- rcu_assign_pointer(help->helper, helper);
- info->helper = helper;
- return ret;
-}
-
#if IS_ENABLED(CONFIG_NF_NAT)
static int parse_nat(const struct nlattr *attr,
struct ovs_conntrack_info *info, bool log)
@@ -1720,9 +1624,12 @@ int ovs_ct_copy_action(struct net *net, const struct nlattr *attr,
}
if (helper) {
- err = ovs_ct_add_helper(&ct_info, helper, key, log);
- if (err)
+ err = nf_ct_add_helper(ct_info.ct, helper, ct_info.family,
+ key->ip.proto, ct_info.nat, &ct_info.helper);
+ if (err) {
+ OVS_NLERR(log, "Failed to add %s helper %d", helper, err);
goto err_free_ct;
+ }
}
err = ovs_nla_add_action(sfa, OVS_ACTION_ATTR_CT, &ct_info,
diff --git a/net/openvswitch/datapath.c b/net/openvswitch/datapath.c
index 8b84869eb2ac..861dfb8daf4a 100644
--- a/net/openvswitch/datapath.c
+++ b/net/openvswitch/datapath.c
@@ -716,9 +716,9 @@ static void get_dp_stats(const struct datapath *dp, struct ovs_dp_stats *stats,
percpu_stats = per_cpu_ptr(dp->stats_percpu, i);
do {
- start = u64_stats_fetch_begin_irq(&percpu_stats->syncp);
+ start = u64_stats_fetch_begin(&percpu_stats->syncp);
local_stats = *percpu_stats;
- } while (u64_stats_fetch_retry_irq(&percpu_stats->syncp, start));
+ } while (u64_stats_fetch_retry(&percpu_stats->syncp, start));
stats->n_hit += local_stats.n_hit;
stats->n_missed += local_stats.n_missed;
diff --git a/net/openvswitch/flow_netlink.c b/net/openvswitch/flow_netlink.c
index 4a07ab094a84..ead5418c126e 100644
--- a/net/openvswitch/flow_netlink.c
+++ b/net/openvswitch/flow_netlink.c
@@ -2309,7 +2309,7 @@ static struct sw_flow_actions *nla_alloc_flow_actions(int size)
WARN_ON_ONCE(size > MAX_ACTIONS_BUFSIZE);
- sfa = kmalloc(sizeof(*sfa) + size, GFP_KERNEL);
+ sfa = kmalloc(kmalloc_size_roundup(sizeof(*sfa) + size), GFP_KERNEL);
if (!sfa)
return ERR_PTR(-ENOMEM);
diff --git a/net/openvswitch/flow_table.c b/net/openvswitch/flow_table.c
index d4a2db0b2299..0a0e4c283f02 100644
--- a/net/openvswitch/flow_table.c
+++ b/net/openvswitch/flow_table.c
@@ -205,9 +205,9 @@ static void tbl_mask_array_reset_counters(struct mask_array *ma)
stats = per_cpu_ptr(ma->masks_usage_stats, cpu);
do {
- start = u64_stats_fetch_begin_irq(&stats->syncp);
+ start = u64_stats_fetch_begin(&stats->syncp);
counter = stats->usage_cntrs[i];
- } while (u64_stats_fetch_retry_irq(&stats->syncp, start));
+ } while (u64_stats_fetch_retry(&stats->syncp, start));
ma->masks_usage_zero_cntr[i] += counter;
}
@@ -1136,10 +1136,9 @@ void ovs_flow_masks_rebalance(struct flow_table *table)
stats = per_cpu_ptr(ma->masks_usage_stats, cpu);
do {
- start = u64_stats_fetch_begin_irq(&stats->syncp);
+ start = u64_stats_fetch_begin(&stats->syncp);
counter = stats->usage_cntrs[i];
- } while (u64_stats_fetch_retry_irq(&stats->syncp,
- start));
+ } while (u64_stats_fetch_retry(&stats->syncp, start));
masks_and_count[i].counter += counter;
}
diff --git a/net/openvswitch/vport-geneve.c b/net/openvswitch/vport-geneve.c
index 89a8e1501809..b10e1602c6b1 100644
--- a/net/openvswitch/vport-geneve.c
+++ b/net/openvswitch/vport-geneve.c
@@ -91,7 +91,7 @@ static struct vport *geneve_tnl_create(const struct vport_parms *parms)
err = dev_change_flags(dev, dev->flags | IFF_UP, NULL);
if (err < 0) {
- rtnl_delete_link(dev);
+ rtnl_delete_link(dev, 0, NULL);
rtnl_unlock();
ovs_vport_free(vport);
goto error;
diff --git a/net/openvswitch/vport-gre.c b/net/openvswitch/vport-gre.c
index e6b5e76a962a..4014c9b5eb79 100644
--- a/net/openvswitch/vport-gre.c
+++ b/net/openvswitch/vport-gre.c
@@ -57,7 +57,7 @@ static struct vport *gre_tnl_create(const struct vport_parms *parms)
err = dev_change_flags(dev, dev->flags | IFF_UP, NULL);
if (err < 0) {
- rtnl_delete_link(dev);
+ rtnl_delete_link(dev, 0, NULL);
rtnl_unlock();
ovs_vport_free(vport);
return ERR_PTR(err);
diff --git a/net/openvswitch/vport-netdev.c b/net/openvswitch/vport-netdev.c
index 2f61d5bdce1a..903537a5da22 100644
--- a/net/openvswitch/vport-netdev.c
+++ b/net/openvswitch/vport-netdev.c
@@ -172,7 +172,7 @@ void ovs_netdev_tunnel_destroy(struct vport *vport)
* if it's not already shutting down.
*/
if (vport->dev->reg_state == NETREG_REGISTERED)
- rtnl_delete_link(vport->dev);
+ rtnl_delete_link(vport->dev, 0, NULL);
netdev_put(vport->dev, &vport->dev_tracker);
vport->dev = NULL;
rtnl_unlock();
diff --git a/net/openvswitch/vport-vxlan.c b/net/openvswitch/vport-vxlan.c
index 188e9c1360a1..0b881b043bcf 100644
--- a/net/openvswitch/vport-vxlan.c
+++ b/net/openvswitch/vport-vxlan.c
@@ -120,7 +120,7 @@ static struct vport *vxlan_tnl_create(const struct vport_parms *parms)
err = dev_change_flags(dev, dev->flags | IFF_UP, NULL);
if (err < 0) {
- rtnl_delete_link(dev);
+ rtnl_delete_link(dev, 0, NULL);
rtnl_unlock();
ovs_vport_free(vport);
goto error;
diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c
index 6ce8dd19f33c..44f20cf8a0c0 100644
--- a/net/packet/af_packet.c
+++ b/net/packet/af_packet.c
@@ -1777,6 +1777,7 @@ static int fanout_add(struct sock *sk, struct fanout_args *args)
match->prot_hook.af_packet_net = read_pnet(&match->net);
match->prot_hook.id_match = match_fanout_group;
match->max_num_members = args->max_num_members;
+ match->prot_hook.ignore_outgoing = type_flags & PACKET_FANOUT_FLAG_IGNORE_OUTGOING;
list_add(&match->list, &fanout_list);
}
err = -EINVAL;
@@ -3277,7 +3278,7 @@ static int packet_bind_spkt(struct socket *sock, struct sockaddr *uaddr,
int addr_len)
{
struct sock *sk = sock->sk;
- char name[sizeof(uaddr->sa_data) + 1];
+ char name[sizeof(uaddr->sa_data_min) + 1];
/*
* Check legality
@@ -3288,8 +3289,8 @@ static int packet_bind_spkt(struct socket *sock, struct sockaddr *uaddr,
/* uaddr->sa_data comes from the userspace, it's not guaranteed to be
* zero-terminated.
*/
- memcpy(name, uaddr->sa_data, sizeof(uaddr->sa_data));
- name[sizeof(uaddr->sa_data)] = 0;
+ memcpy(name, uaddr->sa_data, sizeof(uaddr->sa_data_min));
+ name[sizeof(uaddr->sa_data_min)] = 0;
return packet_do_bind(sk, name, 0, pkt_sk(sk)->num);
}
@@ -3561,11 +3562,11 @@ static int packet_getname_spkt(struct socket *sock, struct sockaddr *uaddr,
return -EOPNOTSUPP;
uaddr->sa_family = AF_PACKET;
- memset(uaddr->sa_data, 0, sizeof(uaddr->sa_data));
+ memset(uaddr->sa_data, 0, sizeof(uaddr->sa_data_min));
rcu_read_lock();
dev = dev_get_by_index_rcu(sock_net(sk), READ_ONCE(pkt_sk(sk)->ifindex));
if (dev)
- strscpy(uaddr->sa_data, dev->name, sizeof(uaddr->sa_data));
+ strscpy(uaddr->sa_data, dev->name, sizeof(uaddr->sa_data_min));
rcu_read_unlock();
return sizeof(*uaddr);
diff --git a/net/rds/message.c b/net/rds/message.c
index 44dbc612ef54..b47e4f0a1639 100644
--- a/net/rds/message.c
+++ b/net/rds/message.c
@@ -366,7 +366,6 @@ static int rds_message_zcopy_from_user(struct rds_message *rm, struct iov_iter *
struct scatterlist *sg;
int ret = 0;
int length = iov_iter_count(from);
- int total_copied = 0;
struct rds_msg_zcopy_info *info;
rm->m_inc.i_hdr.h_len = cpu_to_be32(iov_iter_count(from));
@@ -404,7 +403,6 @@ static int rds_message_zcopy_from_user(struct rds_message *rm, struct iov_iter *
ret = -EFAULT;
goto err;
}
- total_copied += copied;
length -= copied;
sg_set_page(sg, pages, copied, start);
rm->data.op_nents++;
diff --git a/net/rds/send.c b/net/rds/send.c
index 0c5504068e3c..5e57a1581dc6 100644
--- a/net/rds/send.c
+++ b/net/rds/send.c
@@ -1114,7 +1114,7 @@ int rds_sendmsg(struct socket *sock, struct msghdr *msg, size_t payload_len)
struct rds_conn_path *cpath;
struct in6_addr daddr;
__u32 scope_id = 0;
- size_t total_payload_len = payload_len, rdma_payload_len = 0;
+ size_t rdma_payload_len = 0;
bool zcopy = ((msg->msg_flags & MSG_ZEROCOPY) &&
sock_flag(rds_rs_to_sk(rs), SOCK_ZEROCOPY));
int num_sgs = DIV_ROUND_UP(payload_len, PAGE_SIZE);
@@ -1243,7 +1243,6 @@ int rds_sendmsg(struct socket *sock, struct msghdr *msg, size_t payload_len)
if (ret)
goto out;
- total_payload_len += rdma_payload_len;
if (max_t(size_t, payload_len, rdma_payload_len) > RDS_MAX_MSG_SIZE) {
ret = -EMSGSIZE;
goto out;
diff --git a/net/rds/tcp.c b/net/rds/tcp.c
index 4444fd82b66d..c5b86066ff66 100644
--- a/net/rds/tcp.c
+++ b/net/rds/tcp.c
@@ -503,6 +503,9 @@ bool rds_tcp_tune(struct socket *sock)
release_sock(sk);
return false;
}
+ /* Update ns_tracker to current stack trace and refcounted tracker */
+ __netns_tracker_free(net, &sk->ns_tracker, false);
+
sk->sk_net_refcnt = 1;
netns_tracker_alloc(net, &sk->ns_tracker, GFP_KERNEL);
sock_inuse_add(net, 1);
diff --git a/net/rxrpc/Makefile b/net/rxrpc/Makefile
index b11281bed2a4..fdeba488fc6e 100644
--- a/net/rxrpc/Makefile
+++ b/net/rxrpc/Makefile
@@ -30,6 +30,7 @@ rxrpc-y := \
sendmsg.o \
server_key.o \
skbuff.o \
+ txbuf.o \
utils.o
rxrpc-$(CONFIG_PROC_FS) += proc.o
diff --git a/net/rxrpc/af_rxrpc.c b/net/rxrpc/af_rxrpc.c
index ceba28e9dce6..2f3991cf8715 100644
--- a/net/rxrpc/af_rxrpc.c
+++ b/net/rxrpc/af_rxrpc.c
@@ -39,7 +39,7 @@ atomic_t rxrpc_debug_id;
EXPORT_SYMBOL(rxrpc_debug_id);
/* count of skbs currently in use */
-atomic_t rxrpc_n_tx_skbs, rxrpc_n_rx_skbs;
+atomic_t rxrpc_n_rx_skbs;
struct workqueue_struct *rxrpc_workqueue;
@@ -979,7 +979,7 @@ static int __init af_rxrpc_init(void)
goto error_call_jar;
}
- rxrpc_workqueue = alloc_workqueue("krxrpcd", 0, 1);
+ rxrpc_workqueue = alloc_workqueue("krxrpcd", WQ_HIGHPRI | WQ_MEM_RECLAIM | WQ_UNBOUND, 1);
if (!rxrpc_workqueue) {
pr_notice("Failed to allocate work queue\n");
goto error_work_queue;
@@ -1059,7 +1059,6 @@ static void __exit af_rxrpc_exit(void)
sock_unregister(PF_RXRPC);
proto_unregister(&rxrpc_proto);
unregister_pernet_device(&rxrpc_net_ops);
- ASSERTCMP(atomic_read(&rxrpc_n_tx_skbs), ==, 0);
ASSERTCMP(atomic_read(&rxrpc_n_rx_skbs), ==, 0);
/* Make sure the local and peer records pinned by any dying connections
diff --git a/net/rxrpc/ar-internal.h b/net/rxrpc/ar-internal.h
index 1ad0ec5afb50..0273a9029229 100644
--- a/net/rxrpc/ar-internal.h
+++ b/net/rxrpc/ar-internal.h
@@ -29,6 +29,7 @@ struct rxrpc_crypt {
struct key_preparsed_payload;
struct rxrpc_connection;
+struct rxrpc_txbuf;
/*
* Mark applied to socket buffers in skb->mark. skb->priority is used
@@ -93,6 +94,22 @@ struct rxrpc_net {
struct list_head peer_keepalive_new;
struct timer_list peer_keepalive_timer;
struct work_struct peer_keepalive_work;
+
+ atomic_t stat_tx_data;
+ atomic_t stat_tx_data_retrans;
+ atomic_t stat_tx_data_send;
+ atomic_t stat_tx_data_send_frag;
+ atomic_t stat_rx_data;
+ atomic_t stat_rx_data_reqack;
+ atomic_t stat_rx_data_jumbo;
+
+ atomic_t stat_tx_ack_fill;
+ atomic_t stat_tx_ack_send;
+ atomic_t stat_tx_ack_skip;
+ atomic_t stat_tx_acks[256];
+ atomic_t stat_rx_acks[256];
+
+ atomic_t stat_why_req_ack[8];
};
/*
@@ -178,20 +195,12 @@ struct rxrpc_host_header {
* - max 48 bytes (struct sk_buff::cb)
*/
struct rxrpc_skb_priv {
- atomic_t nr_ring_pins; /* Number of rxtx ring pins */
- u8 nr_subpackets; /* Number of subpackets */
- u8 rx_flags; /* Received packet flags */
-#define RXRPC_SKB_INCL_LAST 0x01 /* - Includes last packet */
-#define RXRPC_SKB_TX_BUFFER 0x02 /* - Is transmit buffer */
- union {
- int remain; /* amount of space remaining for next write */
-
- /* List of requested ACKs on subpackets */
- unsigned long rx_req_ack[(RXRPC_MAX_NR_JUMBO + BITS_PER_LONG - 1) /
- BITS_PER_LONG];
- };
+ u16 offset; /* Offset of data */
+ u16 len; /* Length of data */
+ u8 flags;
+#define RXRPC_RX_VERIFIED 0x01
- struct rxrpc_host_header hdr; /* RxRPC packet header from this packet */
+ struct rxrpc_host_header hdr; /* RxRPC packet header from this packet */
};
#define rxrpc_skb(__skb) ((struct rxrpc_skb_priv *) &(__skb)->cb)
@@ -233,19 +242,14 @@ struct rxrpc_security {
size_t *, size_t *, size_t *);
/* impose security on a packet */
- int (*secure_packet)(struct rxrpc_call *, struct sk_buff *, size_t);
+ int (*secure_packet)(struct rxrpc_call *, struct rxrpc_txbuf *);
/* verify the security on a received packet */
- int (*verify_packet)(struct rxrpc_call *, struct sk_buff *,
- unsigned int, unsigned int, rxrpc_seq_t, u16);
+ int (*verify_packet)(struct rxrpc_call *, struct sk_buff *);
/* Free crypto request on a call */
void (*free_call_crypto)(struct rxrpc_call *);
- /* Locate the data in a received packet that has been verified. */
- void (*locate_data)(struct rxrpc_call *, struct sk_buff *,
- unsigned int *, unsigned int *);
-
/* issue a challenge */
int (*issue_challenge)(struct rxrpc_connection *);
@@ -276,6 +280,8 @@ struct rxrpc_local {
struct hlist_node link;
struct socket *socket; /* my UDP socket */
struct work_struct processor;
+ struct list_head ack_tx_queue; /* List of ACKs that need sending */
+ spinlock_t ack_tx_lock; /* ACK list lock */
struct rxrpc_sock __rcu *service; /* Service(s) listening on this endpoint */
struct rw_semaphore defrag_sem; /* control re-enablement of IP DF bit */
struct sk_buff_head reject_queue; /* packets awaiting rejection */
@@ -326,7 +332,7 @@ struct rxrpc_peer {
u32 rto_j; /* Retransmission timeout in jiffies */
u8 backoff; /* Backoff timeout */
- u8 cong_cwnd; /* Congestion window size */
+ u8 cong_ssthresh; /* Congestion slow-start threshold */
};
/*
@@ -490,6 +496,7 @@ enum rxrpc_call_flag {
RXRPC_CALL_EXPOSED, /* The call was exposed to the world */
RXRPC_CALL_RX_LAST, /* Received the last packet (at rxtx_top) */
RXRPC_CALL_TX_LAST, /* Last packet in Tx buffer (at rxtx_top) */
+ RXRPC_CALL_TX_ALL_ACKED, /* Last packet has been hard-acked */
RXRPC_CALL_SEND_PING, /* A ping will need to be sent */
RXRPC_CALL_RETRANS_TIMEOUT, /* Retransmission due to timeout occurred */
RXRPC_CALL_BEGAN_RX_TIMER, /* We began the expect_rx_by timer */
@@ -498,16 +505,16 @@ enum rxrpc_call_flag {
RXRPC_CALL_DISCONNECTED, /* The call has been disconnected */
RXRPC_CALL_KERNEL, /* The call was made by the kernel */
RXRPC_CALL_UPGRADE, /* Service upgrade was requested for the call */
+ RXRPC_CALL_DELAY_ACK_PENDING, /* DELAY ACK generation is pending */
+ RXRPC_CALL_IDLE_ACK_PENDING, /* IDLE ACK generation is pending */
};
/*
* Events that can be raised on a call.
*/
enum rxrpc_call_event {
- RXRPC_CALL_EV_ACK, /* need to generate ACK */
RXRPC_CALL_EV_ABORT, /* need to generate abort */
RXRPC_CALL_EV_RESEND, /* Tx resend required */
- RXRPC_CALL_EV_PING, /* Ping send required */
RXRPC_CALL_EV_EXPIRED, /* Expiry occurred */
RXRPC_CALL_EV_ACK_LOST, /* ACK may be lost, send ping */
};
@@ -566,7 +573,7 @@ struct rxrpc_call {
struct rxrpc_net *rxnet; /* Network namespace to which call belongs */
const struct rxrpc_security *security; /* applied security module */
struct mutex user_mutex; /* User access mutex */
- unsigned long ack_at; /* When deferred ACK needs to happen */
+ unsigned long delay_ack_at; /* When DELAY ACK needs to happen */
unsigned long ack_lost_at; /* When ACK is figured as lost */
unsigned long resend_at; /* When next resend needs to happen */
unsigned long ping_at; /* When next to send a ping */
@@ -576,7 +583,6 @@ struct rxrpc_call {
unsigned long expect_term_by; /* When we expect call termination by */
u32 next_rx_timo; /* Timeout for next Rx packet (jif) */
u32 next_req_timo; /* Timeout for next Rx request packet (jif) */
- struct skcipher_request *cipher_req; /* Packet cipher request buffer */
struct timer_list timer; /* Combined event timer */
struct work_struct processor; /* Event processor */
rxrpc_notify_rx_t notify_rx; /* kernel service Rx notification function */
@@ -587,14 +593,12 @@ struct rxrpc_call {
struct list_head recvmsg_link; /* Link in rx->recvmsg_q */
struct list_head sock_link; /* Link in rx->sock_calls */
struct rb_node sock_node; /* Node in rx->calls */
- struct sk_buff *tx_pending; /* Tx socket buffer being filled */
+ struct rxrpc_txbuf *tx_pending; /* Tx buffer being filled */
wait_queue_head_t waitq; /* Wait queue for channel or Tx */
s64 tx_total_len; /* Total length left to be transmitted (or -1) */
- __be32 crypto_buf[2]; /* Temporary packet crypto buffer */
unsigned long user_call_ID; /* user-defined call ID */
unsigned long flags;
unsigned long events;
- spinlock_t lock;
spinlock_t notify_lock; /* Kernel notification lock */
rwlock_t state_lock; /* lock for state transition */
u32 abort_code; /* Local/remote abort code */
@@ -610,37 +614,27 @@ struct rxrpc_call {
int debug_id; /* debug ID for printks */
unsigned short rx_pkt_offset; /* Current recvmsg packet offset */
unsigned short rx_pkt_len; /* Current recvmsg packet len */
- bool rx_pkt_last; /* Current recvmsg packet is last */
-
- /* Rx/Tx circular buffer, depending on phase.
- *
- * In the Rx phase, packets are annotated with 0 or the number of the
- * segment of a jumbo packet each buffer refers to. There can be up to
- * 47 segments in a maximum-size UDP packet.
- *
- * In the Tx phase, packets are annotated with which buffers have been
- * acked.
- */
-#define RXRPC_RXTX_BUFF_SIZE 64
-#define RXRPC_RXTX_BUFF_MASK (RXRPC_RXTX_BUFF_SIZE - 1)
-#define RXRPC_INIT_RX_WINDOW_SIZE 63
- struct sk_buff **rxtx_buffer;
- u8 *rxtx_annotations;
-#define RXRPC_TX_ANNO_ACK 0
-#define RXRPC_TX_ANNO_UNACK 1
-#define RXRPC_TX_ANNO_NAK 2
-#define RXRPC_TX_ANNO_RETRANS 3
-#define RXRPC_TX_ANNO_MASK 0x03
-#define RXRPC_TX_ANNO_LAST 0x04
-#define RXRPC_TX_ANNO_RESENT 0x08
-
-#define RXRPC_RX_ANNO_SUBPACKET 0x3f /* Subpacket number in jumbogram */
-#define RXRPC_RX_ANNO_VERIFIED 0x80 /* Set if verified and decrypted */
- rxrpc_seq_t tx_hard_ack; /* Dead slot in buffer; the first transmitted but
- * not hard-ACK'd packet follows this.
- */
+
+ /* Transmitted data tracking. */
+ spinlock_t tx_lock; /* Transmit queue lock */
+ struct list_head tx_buffer; /* Buffer of transmissible packets */
+ rxrpc_seq_t tx_bottom; /* First packet in buffer */
+ rxrpc_seq_t tx_transmitted; /* Highest packet transmitted */
rxrpc_seq_t tx_top; /* Highest Tx slot allocated. */
u16 tx_backoff; /* Delay to insert due to Tx failure */
+ u8 tx_winsize; /* Maximum size of Tx window */
+#define RXRPC_TX_MAX_WINDOW 128
+ ktime_t tx_last_sent; /* Last time a transmission occurred */
+
+ /* Received data tracking */
+ struct sk_buff_head recvmsg_queue; /* Queue of packets ready for recvmsg() */
+ struct sk_buff_head rx_oos_queue; /* Queue of out of sequence packets */
+
+ rxrpc_seq_t rx_highest_seq; /* Higest sequence number received */
+ rxrpc_seq_t rx_consumed; /* Highest packet consumed */
+ rxrpc_serial_t rx_serial; /* Highest serial received for this call */
+ u8 rx_winsize; /* Size of Rx window */
+ spinlock_t input_lock; /* Lock for packet input to this call */
/* TCP-style slow-start congestion control [RFC5681]. Since the SMSS
* is fixed, we keep these numbers in terms of segments (ie. DATA
@@ -655,25 +649,17 @@ struct rxrpc_call {
u8 cong_cumul_acks; /* Cumulative ACK count */
ktime_t cong_tstamp; /* Last time cwnd was changed */
- rxrpc_seq_t rx_hard_ack; /* Dead slot in buffer; the first received but not
- * consumed packet follows this.
- */
- rxrpc_seq_t rx_top; /* Highest Rx slot allocated. */
- rxrpc_seq_t rx_expect_next; /* Expected next packet sequence number */
- rxrpc_serial_t rx_serial; /* Highest serial received for this call */
- u8 rx_winsize; /* Size of Rx window */
- u8 tx_winsize; /* Maximum size of Tx window */
- bool tx_phase; /* T if transmission phase, F if receive phase */
- u8 nr_jumbo_bad; /* Number of jumbo dups/exceeds-windows */
-
- spinlock_t input_lock; /* Lock for packet input to this call */
-
/* Receive-phase ACK management (ACKs we send). */
u8 ackr_reason; /* reason to ACK */
rxrpc_serial_t ackr_serial; /* serial of packet being ACK'd */
- rxrpc_seq_t ackr_highest_seq; /* Higest sequence number received */
+ atomic64_t ackr_window; /* Base (in LSW) and top (in MSW) of SACK window */
atomic_t ackr_nr_unacked; /* Number of unacked packets */
atomic_t ackr_nr_consumed; /* Number of packets needing hard ACK */
+ struct {
+#define RXRPC_SACK_SIZE 256
+ /* SACK table for soft-acked packets */
+ u8 ackr_sack_table[RXRPC_SACK_SIZE];
+ } __aligned(8);
/* RTT management */
rxrpc_serial_t rtt_serial[4]; /* Serial number of DATA or PING sent */
@@ -687,21 +673,24 @@ struct rxrpc_call {
ktime_t acks_latest_ts; /* Timestamp of latest ACK received */
rxrpc_seq_t acks_first_seq; /* first sequence number received */
rxrpc_seq_t acks_prev_seq; /* Highest previousPacket received */
+ rxrpc_seq_t acks_hard_ack; /* Latest hard-ack point */
rxrpc_seq_t acks_lowest_nak; /* Lowest NACK in the buffer (or ==tx_hard_ack) */
rxrpc_seq_t acks_lost_top; /* tx_top at the time lost-ack ping sent */
rxrpc_serial_t acks_lost_ping; /* Serial number of probe ACK */
+ rxrpc_serial_t acks_highest_serial; /* Highest serial number ACK'd */
+ struct sk_buff *acks_soft_tbl; /* The last ACK packet with NAKs in it */
+ spinlock_t acks_ack_lock; /* Access to ->acks_last_ack */
};
/*
* Summary of a new ACK and the changes it made to the Tx buffer packet states.
*/
struct rxrpc_ack_summary {
+ u16 nr_acks; /* Number of ACKs in packet */
+ u16 nr_new_acks; /* Number of new ACKs in packet */
+ u16 nr_rot_new_acks; /* Number of rotated new ACKs */
u8 ack_reason;
- u8 nr_acks; /* Number of ACKs in packet */
- u8 nr_nacks; /* Number of NACKs in packet */
- u8 nr_new_acks; /* Number of new ACKs in packet */
- u8 nr_new_nacks; /* Number of new NACKs in packet */
- u8 nr_rot_new_acks; /* Number of rotated new ACKs */
+ bool saw_nacks; /* Saw NACKs in packet */
bool new_low_nack; /* T if new low NACK found */
bool retrans_timeo; /* T if reTx due to timeout happened */
u8 flight_size; /* Number of unreceived transmissions */
@@ -744,12 +733,58 @@ struct rxrpc_send_params {
bool upgrade; /* If the connection is upgradeable */
};
+/*
+ * Buffer of data to be output as a packet.
+ */
+struct rxrpc_txbuf {
+ struct rcu_head rcu;
+ struct list_head call_link; /* Link in call->tx_queue */
+ struct list_head tx_link; /* Link in live Enc queue or Tx queue */
+ struct rxrpc_call *call; /* Call to which belongs */
+ ktime_t last_sent; /* Time at which last transmitted */
+ refcount_t ref;
+ rxrpc_seq_t seq; /* Sequence number of this packet */
+ unsigned int call_debug_id;
+ unsigned int debug_id;
+ unsigned int len; /* Amount of data in buffer */
+ unsigned int space; /* Remaining data space */
+ unsigned int offset; /* Offset of fill point */
+ unsigned long flags;
+#define RXRPC_TXBUF_LAST 0 /* Set if last packet in Tx phase */
+#define RXRPC_TXBUF_RESENT 1 /* Set if has been resent */
+ u8 /*enum rxrpc_propose_ack_trace*/ ack_why; /* If ack, why */
+ struct {
+ /* The packet for encrypting and DMA'ing. We align it such
+ * that data[] aligns correctly for any crypto blocksize.
+ */
+ u8 pad[64 - sizeof(struct rxrpc_wire_header)];
+ struct rxrpc_wire_header wire; /* Network-ready header */
+ union {
+ u8 data[RXRPC_JUMBO_DATALEN]; /* Data packet */
+ struct {
+ struct rxrpc_ackpacket ack;
+ u8 acks[0];
+ };
+ };
+ } __aligned(64);
+};
+
+static inline bool rxrpc_sending_to_server(const struct rxrpc_txbuf *txb)
+{
+ return txb->wire.flags & RXRPC_CLIENT_INITIATED;
+}
+
+static inline bool rxrpc_sending_to_client(const struct rxrpc_txbuf *txb)
+{
+ return !rxrpc_sending_to_server(txb);
+}
+
#include <trace/events/rxrpc.h>
/*
* af_rxrpc.c
*/
-extern atomic_t rxrpc_n_tx_skbs, rxrpc_n_rx_skbs;
+extern atomic_t rxrpc_n_rx_skbs;
extern struct workqueue_struct *rxrpc_workqueue;
/*
@@ -766,8 +801,12 @@ int rxrpc_user_charge_accept(struct rxrpc_sock *, unsigned long);
/*
* call_event.c
*/
-void rxrpc_propose_ACK(struct rxrpc_call *, u8, u32, bool, bool,
- enum rxrpc_propose_ack_trace);
+void rxrpc_propose_ping(struct rxrpc_call *call, u32 serial,
+ enum rxrpc_propose_ack_trace why);
+void rxrpc_send_ACK(struct rxrpc_call *, u8, rxrpc_serial_t, enum rxrpc_propose_ack_trace);
+void rxrpc_propose_delay_ACK(struct rxrpc_call *, rxrpc_serial_t,
+ enum rxrpc_propose_ack_trace);
+void rxrpc_shrink_call_tx_buffer(struct rxrpc_call *);
void rxrpc_process_call(struct work_struct *);
void rxrpc_reduce_call_timer(struct rxrpc_call *call,
@@ -949,15 +988,12 @@ static inline bool __rxrpc_use_local(struct rxrpc_local *local)
* misc.c
*/
extern unsigned int rxrpc_max_backlog __read_mostly;
-extern unsigned long rxrpc_requested_ack_delay;
extern unsigned long rxrpc_soft_ack_delay;
extern unsigned long rxrpc_idle_ack_delay;
extern unsigned int rxrpc_rx_window_size;
extern unsigned int rxrpc_rx_mtu;
extern unsigned int rxrpc_rx_jumbo_max;
-extern const s8 rxrpc_ack_priority[];
-
/*
* net_ns.c
*/
@@ -972,16 +1008,15 @@ static inline struct rxrpc_net *rxrpc_net(struct net *net)
/*
* output.c
*/
-int rxrpc_send_ack_packet(struct rxrpc_call *, bool, rxrpc_serial_t *);
+void rxrpc_transmit_ack_packets(struct rxrpc_local *);
int rxrpc_send_abort_packet(struct rxrpc_call *);
-int rxrpc_send_data_packet(struct rxrpc_call *, struct sk_buff *, bool);
+int rxrpc_send_data_packet(struct rxrpc_call *, struct rxrpc_txbuf *);
void rxrpc_reject_packets(struct rxrpc_local *);
void rxrpc_send_keepalive(struct rxrpc_peer *);
/*
* peer_event.c
*/
-void rxrpc_encap_err_rcv(struct sock *sk, struct sk_buff *skb, unsigned int udp_offset);
void rxrpc_error_report(struct sock *);
void rxrpc_peer_keepalive_worker(struct work_struct *);
@@ -1092,6 +1127,15 @@ void rxrpc_free_skb(struct sk_buff *, enum rxrpc_skb_trace);
void rxrpc_purge_queue(struct sk_buff_head *);
/*
+ * stats.c
+ */
+int rxrpc_stats_show(struct seq_file *seq, void *v);
+int rxrpc_stats_clear(struct file *file, char *buf, size_t size);
+
+#define rxrpc_inc_stat(rxnet, s) atomic_inc(&(rxnet)->s)
+#define rxrpc_dec_stat(rxnet, s) atomic_dec(&(rxnet)->s)
+
+/*
* sysctl.c
*/
#ifdef CONFIG_SYSCTL
@@ -1103,6 +1147,16 @@ static inline void rxrpc_sysctl_exit(void) {}
#endif
/*
+ * txbuf.c
+ */
+extern atomic_t rxrpc_nr_txbuf;
+struct rxrpc_txbuf *rxrpc_alloc_txbuf(struct rxrpc_call *call, u8 packet_type,
+ gfp_t gfp);
+void rxrpc_get_txbuf(struct rxrpc_txbuf *txb, enum rxrpc_txbuf_trace what);
+void rxrpc_see_txbuf(struct rxrpc_txbuf *txb, enum rxrpc_txbuf_trace what);
+void rxrpc_put_txbuf(struct rxrpc_txbuf *txb, enum rxrpc_txbuf_trace what);
+
+/*
* utils.c
*/
int rxrpc_extract_addr_from_skb(struct sockaddr_rxrpc *, struct sk_buff *);
diff --git a/net/rxrpc/call_accept.c b/net/rxrpc/call_accept.c
index 99e10eea3732..48790ee77019 100644
--- a/net/rxrpc/call_accept.c
+++ b/net/rxrpc/call_accept.c
@@ -248,9 +248,8 @@ static void rxrpc_send_ping(struct rxrpc_call *call, struct sk_buff *skb)
if (call->peer->rtt_count < 3 ||
ktime_before(ktime_add_ms(call->peer->rtt_last_req, 1000), now))
- rxrpc_propose_ACK(call, RXRPC_ACK_PING, sp->hdr.serial,
- true, true,
- rxrpc_propose_ack_ping_for_params);
+ rxrpc_send_ACK(call, RXRPC_ACK_PING, sp->hdr.serial,
+ rxrpc_propose_ack_ping_for_params);
}
/*
@@ -325,7 +324,8 @@ static struct rxrpc_call *rxrpc_alloc_incoming_call(struct rxrpc_sock *rx,
call->security = conn->security;
call->security_ix = conn->security_ix;
call->peer = rxrpc_get_peer(conn->params.peer);
- call->cong_cwnd = call->peer->cong_cwnd;
+ call->cong_ssthresh = call->peer->cong_ssthresh;
+ call->tx_last_sent = ktime_get_real();
return call;
}
diff --git a/net/rxrpc/call_event.c b/net/rxrpc/call_event.c
index 2a93e7b5fbd0..1e21a708390e 100644
--- a/net/rxrpc/call_event.c
+++ b/net/rxrpc/call_event.c
@@ -20,127 +20,103 @@
/*
* Propose a PING ACK be sent.
*/
-static void rxrpc_propose_ping(struct rxrpc_call *call,
- bool immediate, bool background)
+void rxrpc_propose_ping(struct rxrpc_call *call, u32 serial,
+ enum rxrpc_propose_ack_trace why)
{
- if (immediate) {
- if (background &&
- !test_and_set_bit(RXRPC_CALL_EV_PING, &call->events))
- rxrpc_queue_call(call);
- } else {
- unsigned long now = jiffies;
- unsigned long ping_at = now + rxrpc_idle_ack_delay;
-
- if (time_before(ping_at, call->ping_at)) {
- WRITE_ONCE(call->ping_at, ping_at);
- rxrpc_reduce_call_timer(call, ping_at, now,
- rxrpc_timer_set_for_ping);
- }
+ unsigned long now = jiffies;
+ unsigned long ping_at = now + rxrpc_idle_ack_delay;
+
+ if (time_before(ping_at, call->ping_at)) {
+ WRITE_ONCE(call->ping_at, ping_at);
+ rxrpc_reduce_call_timer(call, ping_at, now,
+ rxrpc_timer_set_for_ping);
+ trace_rxrpc_propose_ack(call, why, RXRPC_ACK_PING, serial);
}
}
/*
- * propose an ACK be sent
+ * Propose a DELAY ACK be sent in the future.
*/
-static void __rxrpc_propose_ACK(struct rxrpc_call *call, u8 ack_reason,
- u32 serial, bool immediate, bool background,
- enum rxrpc_propose_ack_trace why)
+void rxrpc_propose_delay_ACK(struct rxrpc_call *call, rxrpc_serial_t serial,
+ enum rxrpc_propose_ack_trace why)
{
- enum rxrpc_propose_ack_outcome outcome = rxrpc_propose_ack_use;
unsigned long expiry = rxrpc_soft_ack_delay;
- s8 prior = rxrpc_ack_priority[ack_reason];
-
- /* Pings are handled specially because we don't want to accidentally
- * lose a ping response by subsuming it into a ping.
- */
- if (ack_reason == RXRPC_ACK_PING) {
- rxrpc_propose_ping(call, immediate, background);
- goto trace;
+ unsigned long now = jiffies, ack_at;
+
+ call->ackr_serial = serial;
+
+ if (rxrpc_soft_ack_delay < expiry)
+ expiry = rxrpc_soft_ack_delay;
+ if (call->peer->srtt_us != 0)
+ ack_at = usecs_to_jiffies(call->peer->srtt_us >> 3);
+ else
+ ack_at = expiry;
+
+ ack_at += READ_ONCE(call->tx_backoff);
+ ack_at += now;
+ if (time_before(ack_at, call->delay_ack_at)) {
+ WRITE_ONCE(call->delay_ack_at, ack_at);
+ rxrpc_reduce_call_timer(call, ack_at, now,
+ rxrpc_timer_set_for_ack);
}
- /* Update DELAY, IDLE, REQUESTED and PING_RESPONSE ACK serial
- * numbers, but we don't alter the timeout.
- */
- _debug("prior %u %u vs %u %u",
- ack_reason, prior,
- call->ackr_reason, rxrpc_ack_priority[call->ackr_reason]);
- if (ack_reason == call->ackr_reason) {
- if (RXRPC_ACK_UPDATEABLE & (1 << ack_reason)) {
- outcome = rxrpc_propose_ack_update;
- call->ackr_serial = serial;
- }
- if (!immediate)
- goto trace;
- } else if (prior > rxrpc_ack_priority[call->ackr_reason]) {
- call->ackr_reason = ack_reason;
- call->ackr_serial = serial;
- } else {
- outcome = rxrpc_propose_ack_subsume;
+ trace_rxrpc_propose_ack(call, why, RXRPC_ACK_DELAY, serial);
+}
+
+/*
+ * Queue an ACK for immediate transmission.
+ */
+void rxrpc_send_ACK(struct rxrpc_call *call, u8 ack_reason,
+ rxrpc_serial_t serial, enum rxrpc_propose_ack_trace why)
+{
+ struct rxrpc_local *local = call->conn->params.local;
+ struct rxrpc_txbuf *txb;
+
+ if (test_bit(RXRPC_CALL_DISCONNECTED, &call->flags))
+ return;
+ if (ack_reason == RXRPC_ACK_DELAY &&
+ test_and_set_bit(RXRPC_CALL_DELAY_ACK_PENDING, &call->flags)) {
+ trace_rxrpc_drop_ack(call, why, ack_reason, serial, false);
+ return;
}
- switch (ack_reason) {
- case RXRPC_ACK_REQUESTED:
- if (rxrpc_requested_ack_delay < expiry)
- expiry = rxrpc_requested_ack_delay;
- if (serial == 1)
- immediate = false;
- break;
-
- case RXRPC_ACK_DELAY:
- if (rxrpc_soft_ack_delay < expiry)
- expiry = rxrpc_soft_ack_delay;
- break;
-
- case RXRPC_ACK_IDLE:
- if (rxrpc_idle_ack_delay < expiry)
- expiry = rxrpc_idle_ack_delay;
- break;
-
- default:
- immediate = true;
- break;
+ rxrpc_inc_stat(call->rxnet, stat_tx_acks[ack_reason]);
+
+ txb = rxrpc_alloc_txbuf(call, RXRPC_PACKET_TYPE_ACK,
+ in_softirq() ? GFP_ATOMIC | __GFP_NOWARN : GFP_NOFS);
+ if (!txb) {
+ kleave(" = -ENOMEM");
+ return;
}
- if (test_bit(RXRPC_CALL_EV_ACK, &call->events)) {
- _debug("already scheduled");
- } else if (immediate || expiry == 0) {
- _debug("immediate ACK %lx", call->events);
- if (!test_and_set_bit(RXRPC_CALL_EV_ACK, &call->events) &&
- background)
- rxrpc_queue_call(call);
- } else {
- unsigned long now = jiffies, ack_at;
-
- if (call->peer->srtt_us != 0)
- ack_at = usecs_to_jiffies(call->peer->srtt_us >> 3);
- else
- ack_at = expiry;
-
- ack_at += READ_ONCE(call->tx_backoff);
- ack_at += now;
- if (time_before(ack_at, call->ack_at)) {
- WRITE_ONCE(call->ack_at, ack_at);
- rxrpc_reduce_call_timer(call, ack_at, now,
- rxrpc_timer_set_for_ack);
- }
+ txb->ack_why = why;
+ txb->wire.seq = 0;
+ txb->wire.type = RXRPC_PACKET_TYPE_ACK;
+ txb->wire.flags |= RXRPC_SLOW_START_OK;
+ txb->ack.bufferSpace = 0;
+ txb->ack.maxSkew = 0;
+ txb->ack.firstPacket = 0;
+ txb->ack.previousPacket = 0;
+ txb->ack.serial = htonl(serial);
+ txb->ack.reason = ack_reason;
+ txb->ack.nAcks = 0;
+
+ if (!rxrpc_try_get_call(call, rxrpc_call_got)) {
+ rxrpc_put_txbuf(txb, rxrpc_txbuf_put_nomem);
+ return;
}
-trace:
- trace_rxrpc_propose_ack(call, why, ack_reason, serial, immediate,
- background, outcome);
-}
+ spin_lock_bh(&local->ack_tx_lock);
+ list_add_tail(&txb->tx_link, &local->ack_tx_queue);
+ spin_unlock_bh(&local->ack_tx_lock);
+ trace_rxrpc_send_ack(call, why, ack_reason, serial);
-/*
- * propose an ACK be sent, locking the call structure
- */
-void rxrpc_propose_ACK(struct rxrpc_call *call, u8 ack_reason,
- u32 serial, bool immediate, bool background,
- enum rxrpc_propose_ack_trace why)
-{
- spin_lock_bh(&call->lock);
- __rxrpc_propose_ACK(call, ack_reason, serial,
- immediate, background, why);
- spin_unlock_bh(&call->lock);
+ if (in_task()) {
+ rxrpc_transmit_ack_packets(call->peer->local);
+ } else {
+ rxrpc_get_local(local);
+ rxrpc_queue_local(local);
+ }
}
/*
@@ -156,62 +132,131 @@ static void rxrpc_congestion_timeout(struct rxrpc_call *call)
*/
static void rxrpc_resend(struct rxrpc_call *call, unsigned long now_j)
{
- struct sk_buff *skb;
+ struct rxrpc_ackpacket *ack = NULL;
+ struct rxrpc_txbuf *txb;
+ struct sk_buff *ack_skb = NULL;
unsigned long resend_at;
- rxrpc_seq_t cursor, seq, top;
+ rxrpc_seq_t transmitted = READ_ONCE(call->tx_transmitted);
ktime_t now, max_age, oldest, ack_ts;
- int ix;
- u8 annotation, anno_type, retrans = 0, unacked = 0;
+ bool unacked = false;
+ unsigned int i;
+ LIST_HEAD(retrans_queue);
- _enter("{%d,%d}", call->tx_hard_ack, call->tx_top);
+ _enter("{%d,%d}", call->acks_hard_ack, call->tx_top);
now = ktime_get_real();
max_age = ktime_sub_us(now, jiffies_to_usecs(call->peer->rto_j));
+ oldest = now;
+
+ /* See if there's an ACK saved with a soft-ACK table in it. */
+ if (call->acks_soft_tbl) {
+ spin_lock_bh(&call->acks_ack_lock);
+ ack_skb = call->acks_soft_tbl;
+ if (ack_skb) {
+ rxrpc_get_skb(ack_skb, rxrpc_skb_ack);
+ ack = (void *)ack_skb->data + sizeof(struct rxrpc_wire_header);
+ }
+ spin_unlock_bh(&call->acks_ack_lock);
+ }
- spin_lock_bh(&call->lock);
+ if (list_empty(&call->tx_buffer))
+ goto no_resend;
- cursor = call->tx_hard_ack;
- top = call->tx_top;
- ASSERT(before_eq(cursor, top));
- if (cursor == top)
- goto out_unlock;
+ spin_lock(&call->tx_lock);
- /* Scan the packet list without dropping the lock and decide which of
- * the packets in the Tx buffer we're going to resend and what the new
- * resend timeout will be.
- */
- trace_rxrpc_resend(call, (cursor + 1) & RXRPC_RXTX_BUFF_MASK);
- oldest = now;
- for (seq = cursor + 1; before_eq(seq, top); seq++) {
- ix = seq & RXRPC_RXTX_BUFF_MASK;
- annotation = call->rxtx_annotations[ix];
- anno_type = annotation & RXRPC_TX_ANNO_MASK;
- annotation &= ~RXRPC_TX_ANNO_MASK;
- if (anno_type == RXRPC_TX_ANNO_ACK)
- continue;
+ if (list_empty(&call->tx_buffer))
+ goto no_further_resend;
+
+ trace_rxrpc_resend(call);
+ txb = list_first_entry(&call->tx_buffer, struct rxrpc_txbuf, call_link);
- skb = call->rxtx_buffer[ix];
- rxrpc_see_skb(skb, rxrpc_skb_seen);
+ /* Scan the soft ACK table without dropping the lock and resend any
+ * explicitly NAK'd packets.
+ */
+ if (ack) {
+ for (i = 0; i < ack->nAcks; i++) {
+ rxrpc_seq_t seq;
- if (anno_type == RXRPC_TX_ANNO_UNACK) {
- if (ktime_after(skb->tstamp, max_age)) {
- if (ktime_before(skb->tstamp, oldest))
- oldest = skb->tstamp;
+ if (ack->acks[i] & 1)
continue;
+ seq = ntohl(ack->firstPacket) + i;
+ if (after(txb->seq, transmitted))
+ break;
+ if (after(txb->seq, seq))
+ continue; /* A new hard ACK probably came in */
+ list_for_each_entry_from(txb, &call->tx_buffer, call_link) {
+ if (txb->seq == seq)
+ goto found_txb;
+ }
+ goto no_further_resend;
+
+ found_txb:
+ if (after(ntohl(txb->wire.serial), call->acks_highest_serial))
+ continue; /* Ack point not yet reached */
+
+ rxrpc_see_txbuf(txb, rxrpc_txbuf_see_unacked);
+
+ if (list_empty(&txb->tx_link)) {
+ rxrpc_get_txbuf(txb, rxrpc_txbuf_get_retrans);
+ rxrpc_get_call(call, rxrpc_call_got_tx);
+ list_add_tail(&txb->tx_link, &retrans_queue);
+ set_bit(RXRPC_TXBUF_RESENT, &txb->flags);
}
- if (!(annotation & RXRPC_TX_ANNO_RESENT))
- unacked++;
+
+ trace_rxrpc_retransmit(call, txb->seq,
+ ktime_to_ns(ktime_sub(txb->last_sent,
+ max_age)));
+
+ if (list_is_last(&txb->call_link, &call->tx_buffer))
+ goto no_further_resend;
+ txb = list_next_entry(txb, call_link);
+ }
+ }
+
+ /* Fast-forward through the Tx queue to the point the peer says it has
+ * seen. Anything between the soft-ACK table and that point will get
+ * ACK'd or NACK'd in due course, so don't worry about it here; here we
+ * need to consider retransmitting anything beyond that point.
+ *
+ * Note that ACK for a packet can beat the update of tx_transmitted.
+ */
+ if (after_eq(READ_ONCE(call->acks_prev_seq), READ_ONCE(call->tx_transmitted)))
+ goto no_further_resend;
+
+ list_for_each_entry_from(txb, &call->tx_buffer, call_link) {
+ if (before_eq(txb->seq, READ_ONCE(call->acks_prev_seq)))
+ continue;
+ if (after(txb->seq, READ_ONCE(call->tx_transmitted)))
+ break; /* Not transmitted yet */
+
+ if (ack && ack->reason == RXRPC_ACK_PING_RESPONSE &&
+ before(ntohl(txb->wire.serial), ntohl(ack->serial)))
+ goto do_resend; /* Wasn't accounted for by a more recent ping. */
+
+ if (ktime_after(txb->last_sent, max_age)) {
+ if (ktime_before(txb->last_sent, oldest))
+ oldest = txb->last_sent;
+ continue;
}
- /* Okay, we need to retransmit a packet. */
- call->rxtx_annotations[ix] = RXRPC_TX_ANNO_RETRANS | annotation;
- retrans++;
- trace_rxrpc_retransmit(call, seq, annotation | anno_type,
- ktime_to_ns(ktime_sub(skb->tstamp, max_age)));
+ do_resend:
+ unacked = true;
+ if (list_empty(&txb->tx_link)) {
+ rxrpc_get_txbuf(txb, rxrpc_txbuf_get_retrans);
+ list_add_tail(&txb->tx_link, &retrans_queue);
+ set_bit(RXRPC_TXBUF_RESENT, &txb->flags);
+ rxrpc_inc_stat(call->rxnet, stat_tx_data_retrans);
+ }
}
+no_further_resend:
+ spin_unlock(&call->tx_lock);
+no_resend:
+ rxrpc_free_skb(ack_skb, rxrpc_skb_freed);
+
resend_at = nsecs_to_jiffies(ktime_to_ns(ktime_sub(now, oldest)));
- resend_at += jiffies + rxrpc_get_rto_backoff(call->peer, retrans);
+ resend_at += jiffies + rxrpc_get_rto_backoff(call->peer,
+ !list_empty(&retrans_queue));
WRITE_ONCE(call->resend_at, resend_at);
if (unacked)
@@ -221,62 +266,28 @@ static void rxrpc_resend(struct rxrpc_call *call, unsigned long now_j)
* that an ACK got lost somewhere. Send a ping to find out instead of
* retransmitting data.
*/
- if (!retrans) {
+ if (list_empty(&retrans_queue)) {
rxrpc_reduce_call_timer(call, resend_at, now_j,
rxrpc_timer_set_for_resend);
- spin_unlock_bh(&call->lock);
ack_ts = ktime_sub(now, call->acks_latest_ts);
if (ktime_to_us(ack_ts) < (call->peer->srtt_us >> 3))
goto out;
- rxrpc_propose_ACK(call, RXRPC_ACK_PING, 0, true, false,
- rxrpc_propose_ack_ping_for_lost_ack);
- rxrpc_send_ack_packet(call, true, NULL);
+ rxrpc_send_ACK(call, RXRPC_ACK_PING, 0,
+ rxrpc_propose_ack_ping_for_lost_ack);
goto out;
}
- /* Now go through the Tx window and perform the retransmissions. We
- * have to drop the lock for each send. If an ACK comes in whilst the
- * lock is dropped, it may clear some of the retransmission markers for
- * packets that it soft-ACKs.
- */
- for (seq = cursor + 1; before_eq(seq, top); seq++) {
- ix = seq & RXRPC_RXTX_BUFF_MASK;
- annotation = call->rxtx_annotations[ix];
- anno_type = annotation & RXRPC_TX_ANNO_MASK;
- if (anno_type != RXRPC_TX_ANNO_RETRANS)
- continue;
-
- /* We need to reset the retransmission state, but we need to do
- * so before we drop the lock as a new ACK/NAK may come in and
- * confuse things
- */
- annotation &= ~RXRPC_TX_ANNO_MASK;
- annotation |= RXRPC_TX_ANNO_UNACK | RXRPC_TX_ANNO_RESENT;
- call->rxtx_annotations[ix] = annotation;
-
- skb = call->rxtx_buffer[ix];
- if (!skb)
- continue;
-
- rxrpc_get_skb(skb, rxrpc_skb_got);
- spin_unlock_bh(&call->lock);
-
- if (rxrpc_send_data_packet(call, skb, true) < 0) {
- rxrpc_free_skb(skb, rxrpc_skb_freed);
- return;
- }
+ while ((txb = list_first_entry_or_null(&retrans_queue,
+ struct rxrpc_txbuf, tx_link))) {
+ list_del_init(&txb->tx_link);
+ rxrpc_send_data_packet(call, txb);
+ rxrpc_put_txbuf(txb, rxrpc_txbuf_put_trans);
- if (rxrpc_is_client_call(call))
- rxrpc_expose_client_call(call);
-
- rxrpc_free_skb(skb, rxrpc_skb_freed);
- spin_lock_bh(&call->lock);
- if (after(call->tx_hard_ack, seq))
- seq = call->tx_hard_ack;
+ trace_rxrpc_retransmit(call, txb->seq,
+ ktime_to_ns(ktime_sub(txb->last_sent,
+ max_age)));
}
-out_unlock:
- spin_unlock_bh(&call->lock);
out:
_leave("");
}
@@ -288,9 +299,9 @@ void rxrpc_process_call(struct work_struct *work)
{
struct rxrpc_call *call =
container_of(work, struct rxrpc_call, processor);
- rxrpc_serial_t *send_ack;
unsigned long now, next, t;
unsigned int iterations = 0;
+ rxrpc_serial_t ackr_serial;
rxrpc_see_call(call);
@@ -309,6 +320,9 @@ recheck_state:
goto recheck_state;
}
+ if (READ_ONCE(call->acks_hard_ack) != call->tx_bottom)
+ rxrpc_shrink_call_tx_buffer(call);
+
if (call->state == RXRPC_CALL_COMPLETE) {
rxrpc_delete_call_timer(call);
goto out_put;
@@ -335,11 +349,13 @@ recheck_state:
set_bit(RXRPC_CALL_EV_EXPIRED, &call->events);
}
- t = READ_ONCE(call->ack_at);
+ t = READ_ONCE(call->delay_ack_at);
if (time_after_eq(now, t)) {
trace_rxrpc_timer(call, rxrpc_timer_exp_ack, now);
- cmpxchg(&call->ack_at, t, now + MAX_JIFFY_OFFSET);
- set_bit(RXRPC_CALL_EV_ACK, &call->events);
+ cmpxchg(&call->delay_ack_at, t, now + MAX_JIFFY_OFFSET);
+ ackr_serial = xchg(&call->ackr_serial, 0);
+ rxrpc_send_ACK(call, RXRPC_ACK_DELAY, ackr_serial,
+ rxrpc_propose_ack_ping_for_lost_ack);
}
t = READ_ONCE(call->ack_lost_at);
@@ -353,16 +369,16 @@ recheck_state:
if (time_after_eq(now, t)) {
trace_rxrpc_timer(call, rxrpc_timer_exp_keepalive, now);
cmpxchg(&call->keepalive_at, t, now + MAX_JIFFY_OFFSET);
- rxrpc_propose_ACK(call, RXRPC_ACK_PING, 0, true, true,
- rxrpc_propose_ack_ping_for_keepalive);
- set_bit(RXRPC_CALL_EV_PING, &call->events);
+ rxrpc_send_ACK(call, RXRPC_ACK_PING, 0,
+ rxrpc_propose_ack_ping_for_keepalive);
}
t = READ_ONCE(call->ping_at);
if (time_after_eq(now, t)) {
trace_rxrpc_timer(call, rxrpc_timer_exp_ping, now);
cmpxchg(&call->ping_at, t, now + MAX_JIFFY_OFFSET);
- set_bit(RXRPC_CALL_EV_PING, &call->events);
+ rxrpc_send_ACK(call, RXRPC_ACK_PING, 0,
+ rxrpc_propose_ack_ping_for_keepalive);
}
t = READ_ONCE(call->resend_at);
@@ -385,25 +401,10 @@ recheck_state:
goto recheck_state;
}
- send_ack = NULL;
if (test_and_clear_bit(RXRPC_CALL_EV_ACK_LOST, &call->events)) {
call->acks_lost_top = call->tx_top;
- rxrpc_propose_ACK(call, RXRPC_ACK_PING, 0, true, false,
- rxrpc_propose_ack_ping_for_lost_ack);
- send_ack = &call->acks_lost_ping;
- }
-
- if (test_and_clear_bit(RXRPC_CALL_EV_ACK, &call->events) ||
- send_ack) {
- if (call->ackr_reason) {
- rxrpc_send_ack_packet(call, false, send_ack);
- goto recheck_state;
- }
- }
-
- if (test_and_clear_bit(RXRPC_CALL_EV_PING, &call->events)) {
- rxrpc_send_ack_packet(call, true, NULL);
- goto recheck_state;
+ rxrpc_send_ACK(call, RXRPC_ACK_PING, 0,
+ rxrpc_propose_ack_ping_for_lost_ack);
}
if (test_and_clear_bit(RXRPC_CALL_EV_RESEND, &call->events) &&
@@ -419,7 +420,7 @@ recheck_state:
set(call->expect_req_by);
set(call->expect_term_by);
- set(call->ack_at);
+ set(call->delay_ack_at);
set(call->ack_lost_at);
set(call->resend_at);
set(call->keepalive_at);
diff --git a/net/rxrpc/call_object.c b/net/rxrpc/call_object.c
index 6401cdf7a624..1befe22cd301 100644
--- a/net/rxrpc/call_object.c
+++ b/net/rxrpc/call_object.c
@@ -52,7 +52,7 @@ static void rxrpc_call_timer_expired(struct timer_list *t)
_enter("%d", call->debug_id);
if (call->state < RXRPC_CALL_COMPLETE) {
- trace_rxrpc_timer(call, rxrpc_timer_expired, jiffies);
+ trace_rxrpc_timer_expired(call, jiffies);
__rxrpc_queue_call(call);
} else {
rxrpc_put_call(call, rxrpc_call_put);
@@ -129,16 +129,6 @@ struct rxrpc_call *rxrpc_alloc_call(struct rxrpc_sock *rx, gfp_t gfp,
if (!call)
return NULL;
- call->rxtx_buffer = kcalloc(RXRPC_RXTX_BUFF_SIZE,
- sizeof(struct sk_buff *),
- gfp);
- if (!call->rxtx_buffer)
- goto nomem;
-
- call->rxtx_annotations = kcalloc(RXRPC_RXTX_BUFF_SIZE, sizeof(u8), gfp);
- if (!call->rxtx_annotations)
- goto nomem_2;
-
mutex_init(&call->user_mutex);
/* Prevent lockdep reporting a deadlock false positive between the afs
@@ -155,37 +145,39 @@ struct rxrpc_call *rxrpc_alloc_call(struct rxrpc_sock *rx, gfp_t gfp,
INIT_LIST_HEAD(&call->accept_link);
INIT_LIST_HEAD(&call->recvmsg_link);
INIT_LIST_HEAD(&call->sock_link);
+ INIT_LIST_HEAD(&call->tx_buffer);
+ skb_queue_head_init(&call->recvmsg_queue);
+ skb_queue_head_init(&call->rx_oos_queue);
init_waitqueue_head(&call->waitq);
- spin_lock_init(&call->lock);
spin_lock_init(&call->notify_lock);
+ spin_lock_init(&call->tx_lock);
spin_lock_init(&call->input_lock);
+ spin_lock_init(&call->acks_ack_lock);
rwlock_init(&call->state_lock);
refcount_set(&call->ref, 1);
call->debug_id = debug_id;
call->tx_total_len = -1;
call->next_rx_timo = 20 * HZ;
call->next_req_timo = 1 * HZ;
+ atomic64_set(&call->ackr_window, 0x100000001ULL);
memset(&call->sock_node, 0xed, sizeof(call->sock_node));
- /* Leave space in the ring to handle a maxed-out jumbo packet */
call->rx_winsize = rxrpc_rx_window_size;
call->tx_winsize = 16;
- call->rx_expect_next = 1;
- call->cong_cwnd = 2;
- call->cong_ssthresh = RXRPC_RXTX_BUFF_SIZE - 1;
+ if (RXRPC_TX_SMSS > 2190)
+ call->cong_cwnd = 2;
+ else if (RXRPC_TX_SMSS > 1095)
+ call->cong_cwnd = 3;
+ else
+ call->cong_cwnd = 4;
+ call->cong_ssthresh = RXRPC_TX_MAX_WINDOW;
call->rxnet = rxnet;
call->rtt_avail = RXRPC_CALL_RTT_AVAIL_MASK;
atomic_inc(&rxnet->nr_calls);
return call;
-
-nomem_2:
- kfree(call->rxtx_buffer);
-nomem:
- kmem_cache_free(rxrpc_call_jar, call);
- return NULL;
}
/*
@@ -206,7 +198,6 @@ static struct rxrpc_call *rxrpc_alloc_client_call(struct rxrpc_sock *rx,
return ERR_PTR(-ENOMEM);
call->state = RXRPC_CALL_CLIENT_AWAIT_CONN;
call->service_id = srx->srx_service;
- call->tx_phase = true;
now = ktime_get_real();
call->acks_latest_ts = now;
call->cong_tstamp = now;
@@ -223,7 +214,7 @@ static void rxrpc_start_call_timer(struct rxrpc_call *call)
unsigned long now = jiffies;
unsigned long j = now + MAX_JIFFY_OFFSET;
- call->ack_at = j;
+ call->delay_ack_at = j;
call->ack_lost_at = j;
call->resend_at = j;
call->ping_at = j;
@@ -510,16 +501,12 @@ void rxrpc_get_call(struct rxrpc_call *call, enum rxrpc_call_trace op)
}
/*
- * Clean up the RxTx skb ring.
+ * Clean up the Rx skb ring.
*/
static void rxrpc_cleanup_ring(struct rxrpc_call *call)
{
- int i;
-
- for (i = 0; i < RXRPC_RXTX_BUFF_SIZE; i++) {
- rxrpc_free_skb(call->rxtx_buffer[i], rxrpc_skb_cleaned);
- call->rxtx_buffer[i] = NULL;
- }
+ skb_queue_purge(&call->recvmsg_queue);
+ skb_queue_purge(&call->rx_oos_queue);
}
/*
@@ -539,10 +526,8 @@ void rxrpc_release_call(struct rxrpc_sock *rx, struct rxrpc_call *call)
ASSERTCMP(call->state, ==, RXRPC_CALL_COMPLETE);
- spin_lock_bh(&call->lock);
if (test_and_set_bit(RXRPC_CALL_RELEASED, &call->flags))
BUG();
- spin_unlock_bh(&call->lock);
rxrpc_put_call_slot(call);
rxrpc_delete_call_timer(call);
@@ -656,8 +641,6 @@ static void rxrpc_destroy_call(struct work_struct *work)
rxrpc_put_connection(call->conn);
rxrpc_put_peer(call->peer);
- kfree(call->rxtx_buffer);
- kfree(call->rxtx_annotations);
kmem_cache_free(rxrpc_call_jar, call);
if (atomic_dec_and_test(&rxnet->nr_calls))
wake_up_var(&rxnet->nr_calls);
@@ -684,6 +667,8 @@ static void rxrpc_rcu_destroy_call(struct rcu_head *rcu)
*/
void rxrpc_cleanup_call(struct rxrpc_call *call)
{
+ struct rxrpc_txbuf *txb;
+
_net("DESTROY CALL %d", call->debug_id);
memset(&call->sock_node, 0xcd, sizeof(call->sock_node));
@@ -692,7 +677,13 @@ void rxrpc_cleanup_call(struct rxrpc_call *call)
ASSERT(test_bit(RXRPC_CALL_RELEASED, &call->flags));
rxrpc_cleanup_ring(call);
- rxrpc_free_skb(call->tx_pending, rxrpc_skb_cleaned);
+ while ((txb = list_first_entry_or_null(&call->tx_buffer,
+ struct rxrpc_txbuf, call_link))) {
+ list_del(&txb->call_link);
+ rxrpc_put_txbuf(txb, rxrpc_txbuf_put_cleaned);
+ }
+ rxrpc_put_txbuf(call->tx_pending, rxrpc_txbuf_put_cleaned);
+ rxrpc_free_skb(call->acks_soft_tbl, rxrpc_skb_cleaned);
call_rcu(&call->rcu, rxrpc_rcu_destroy_call);
}
diff --git a/net/rxrpc/conn_client.c b/net/rxrpc/conn_client.c
index 3c9eeb5b750c..f020f308ed9e 100644
--- a/net/rxrpc/conn_client.c
+++ b/net/rxrpc/conn_client.c
@@ -363,7 +363,8 @@ static struct rxrpc_bundle *rxrpc_prep_call(struct rxrpc_sock *rx,
if (!cp->peer)
goto error;
- call->cong_cwnd = cp->peer->cong_cwnd;
+ call->tx_last_sent = ktime_get_real();
+ call->cong_ssthresh = cp->peer->cong_ssthresh;
if (call->cong_cwnd >= call->cong_ssthresh)
call->cong_mode = RXRPC_CALL_CONGEST_AVOIDANCE;
else
diff --git a/net/rxrpc/conn_object.c b/net/rxrpc/conn_object.c
index 22089e37e97f..156bd26daf74 100644
--- a/net/rxrpc/conn_object.c
+++ b/net/rxrpc/conn_object.c
@@ -175,7 +175,7 @@ void __rxrpc_disconnect_call(struct rxrpc_connection *conn,
trace_rxrpc_disconnect_call(call);
switch (call->completion) {
case RXRPC_CALL_SUCCEEDED:
- chan->last_seq = call->rx_hard_ack;
+ chan->last_seq = call->rx_highest_seq;
chan->last_type = RXRPC_PACKET_TYPE_ACK;
break;
case RXRPC_CALL_LOCALLY_ABORTED:
@@ -207,7 +207,7 @@ void rxrpc_disconnect_call(struct rxrpc_call *call)
{
struct rxrpc_connection *conn = call->conn;
- call->peer->cong_cwnd = call->cong_cwnd;
+ call->peer->cong_ssthresh = call->cong_ssthresh;
if (!hlist_unhashed(&call->error_link)) {
spin_lock_bh(&call->peer->lock);
diff --git a/net/rxrpc/input.c b/net/rxrpc/input.c
index 721d847ba92b..bdf70b81addc 100644
--- a/net/rxrpc/input.c
+++ b/net/rxrpc/input.c
@@ -7,20 +7,6 @@
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
-#include <linux/module.h>
-#include <linux/net.h>
-#include <linux/skbuff.h>
-#include <linux/errqueue.h>
-#include <linux/udp.h>
-#include <linux/in.h>
-#include <linux/in6.h>
-#include <linux/icmp.h>
-#include <linux/gfp.h>
-#include <net/sock.h>
-#include <net/af_rxrpc.h>
-#include <net/ip.h>
-#include <net/udp.h>
-#include <net/net_namespace.h>
#include "ar-internal.h"
static void rxrpc_proto_abort(const char *why,
@@ -46,7 +32,7 @@ static void rxrpc_congestion_management(struct rxrpc_call *call,
bool resend = false;
summary->flight_size =
- (call->tx_top - call->tx_hard_ack) - summary->nr_acks;
+ (call->tx_top - call->acks_hard_ack) - summary->nr_acks;
if (test_and_clear_bit(RXRPC_CALL_RETRANS_TIMEOUT, &call->flags)) {
summary->retrans_timeo = true;
@@ -72,9 +58,28 @@ static void rxrpc_congestion_management(struct rxrpc_call *call,
summary->cumulative_acks = cumulative_acks;
summary->dup_acks = call->cong_dup_acks;
+ /* If we haven't transmitted anything for >1RTT, we should reset the
+ * congestion management state.
+ */
+ if ((call->cong_mode == RXRPC_CALL_SLOW_START ||
+ call->cong_mode == RXRPC_CALL_CONGEST_AVOIDANCE) &&
+ ktime_before(ktime_add_us(call->tx_last_sent,
+ call->peer->srtt_us >> 3),
+ ktime_get_real())
+ ) {
+ change = rxrpc_cong_idle_reset;
+ summary->mode = RXRPC_CALL_SLOW_START;
+ if (RXRPC_TX_SMSS > 2190)
+ summary->cwnd = 2;
+ else if (RXRPC_TX_SMSS > 1095)
+ summary->cwnd = 3;
+ else
+ summary->cwnd = 4;
+ }
+
switch (call->cong_mode) {
case RXRPC_CALL_SLOW_START:
- if (summary->nr_nacks > 0)
+ if (summary->saw_nacks)
goto packet_loss_detected;
if (summary->cumulative_acks > 0)
cwnd += 1;
@@ -85,7 +90,7 @@ static void rxrpc_congestion_management(struct rxrpc_call *call,
goto out;
case RXRPC_CALL_CONGEST_AVOIDANCE:
- if (summary->nr_nacks > 0)
+ if (summary->saw_nacks)
goto packet_loss_detected;
/* We analyse the number of packets that get ACK'd per RTT
@@ -104,7 +109,7 @@ static void rxrpc_congestion_management(struct rxrpc_call *call,
goto out;
case RXRPC_CALL_PACKET_LOSS:
- if (summary->nr_nacks == 0)
+ if (!summary->saw_nacks)
goto resume_normality;
if (summary->new_low_nack) {
@@ -142,7 +147,7 @@ static void rxrpc_congestion_management(struct rxrpc_call *call,
} else {
change = rxrpc_cong_progress;
cwnd = call->cong_ssthresh;
- if (summary->nr_nacks == 0)
+ if (!summary->saw_nacks)
goto resume_normality;
}
goto out;
@@ -164,8 +169,8 @@ resume_normality:
out:
cumulative_acks = 0;
out_no_clear_ca:
- if (cwnd >= RXRPC_RXTX_BUFF_SIZE - 1)
- cwnd = RXRPC_RXTX_BUFF_SIZE - 1;
+ if (cwnd >= RXRPC_TX_MAX_WINDOW)
+ cwnd = RXRPC_TX_MAX_WINDOW;
call->cong_cwnd = cwnd;
call->cong_cumul_acks = cumulative_acks;
trace_rxrpc_congest(call, summary, acked_serial, change);
@@ -183,9 +188,8 @@ send_extra_data:
/* Send some previously unsent DATA if we have some to advance the ACK
* state.
*/
- if (call->rxtx_annotations[call->tx_top & RXRPC_RXTX_BUFF_MASK] &
- RXRPC_TX_ANNO_LAST ||
- summary->nr_acks != call->tx_top - call->tx_hard_ack) {
+ if (test_bit(RXRPC_CALL_TX_LAST, &call->flags) ||
+ summary->nr_acks != call->tx_top - call->acks_hard_ack) {
call->cong_extra++;
wake_up(&call->waitq);
}
@@ -198,53 +202,39 @@ send_extra_data:
static bool rxrpc_rotate_tx_window(struct rxrpc_call *call, rxrpc_seq_t to,
struct rxrpc_ack_summary *summary)
{
- struct sk_buff *skb, *list = NULL;
+ struct rxrpc_txbuf *txb;
bool rot_last = false;
- int ix;
- u8 annotation;
-
- if (call->acks_lowest_nak == call->tx_hard_ack) {
- call->acks_lowest_nak = to;
- } else if (before_eq(call->acks_lowest_nak, to)) {
- summary->new_low_nack = true;
- call->acks_lowest_nak = to;
- }
-
- spin_lock(&call->lock);
-
- while (before(call->tx_hard_ack, to)) {
- call->tx_hard_ack++;
- ix = call->tx_hard_ack & RXRPC_RXTX_BUFF_MASK;
- skb = call->rxtx_buffer[ix];
- annotation = call->rxtx_annotations[ix];
- rxrpc_see_skb(skb, rxrpc_skb_rotated);
- call->rxtx_buffer[ix] = NULL;
- call->rxtx_annotations[ix] = 0;
- skb->next = list;
- list = skb;
- if (annotation & RXRPC_TX_ANNO_LAST) {
+ list_for_each_entry_rcu(txb, &call->tx_buffer, call_link, false) {
+ if (before_eq(txb->seq, call->acks_hard_ack))
+ continue;
+ summary->nr_rot_new_acks++;
+ if (test_bit(RXRPC_TXBUF_LAST, &txb->flags)) {
set_bit(RXRPC_CALL_TX_LAST, &call->flags);
rot_last = true;
}
- if ((annotation & RXRPC_TX_ANNO_MASK) != RXRPC_TX_ANNO_ACK)
- summary->nr_rot_new_acks++;
+ if (txb->seq == to)
+ break;
}
- spin_unlock(&call->lock);
+ if (rot_last)
+ set_bit(RXRPC_CALL_TX_ALL_ACKED, &call->flags);
- trace_rxrpc_transmit(call, (rot_last ?
- rxrpc_transmit_rotate_last :
- rxrpc_transmit_rotate));
- wake_up(&call->waitq);
+ _enter("%x,%x,%x,%d", to, call->acks_hard_ack, call->tx_top, rot_last);
- while (list) {
- skb = list;
- list = skb->next;
- skb_mark_not_on_list(skb);
- rxrpc_free_skb(skb, rxrpc_skb_freed);
+ if (call->acks_lowest_nak == call->acks_hard_ack) {
+ call->acks_lowest_nak = to;
+ } else if (after(to, call->acks_lowest_nak)) {
+ summary->new_low_nack = true;
+ call->acks_lowest_nak = to;
}
+ smp_store_release(&call->acks_hard_ack, to);
+
+ trace_rxrpc_txqueue(call, (rot_last ?
+ rxrpc_txqueue_rotate_last :
+ rxrpc_txqueue_rotate));
+ wake_up(&call->waitq);
return rot_last;
}
@@ -284,9 +274,9 @@ static bool rxrpc_end_tx_phase(struct rxrpc_call *call, bool reply_begun,
write_unlock(&call->state_lock);
if (state == RXRPC_CALL_CLIENT_AWAIT_REPLY)
- trace_rxrpc_transmit(call, rxrpc_transmit_await_reply);
+ trace_rxrpc_txqueue(call, rxrpc_txqueue_await_reply);
else
- trace_rxrpc_transmit(call, rxrpc_transmit_end);
+ trace_rxrpc_txqueue(call, rxrpc_txqueue_end);
_leave(" = ok");
return true;
@@ -307,13 +297,10 @@ static bool rxrpc_receiving_reply(struct rxrpc_call *call)
rxrpc_seq_t top = READ_ONCE(call->tx_top);
if (call->ackr_reason) {
- spin_lock_bh(&call->lock);
- call->ackr_reason = 0;
- spin_unlock_bh(&call->lock);
now = jiffies;
timo = now + MAX_JIFFY_OFFSET;
WRITE_ONCE(call->resend_at, timo);
- WRITE_ONCE(call->ack_at, timo);
+ WRITE_ONCE(call->delay_ack_at, timo);
trace_rxrpc_timer(call, rxrpc_timer_init_for_reply, now);
}
@@ -323,85 +310,230 @@ static bool rxrpc_receiving_reply(struct rxrpc_call *call)
return false;
}
}
- if (!rxrpc_end_tx_phase(call, true, "ETD"))
- return false;
- call->tx_phase = false;
- return true;
+ return rxrpc_end_tx_phase(call, true, "ETD");
+}
+
+static void rxrpc_input_update_ack_window(struct rxrpc_call *call,
+ rxrpc_seq_t window, rxrpc_seq_t wtop)
+{
+ atomic64_set_release(&call->ackr_window, ((u64)wtop) << 32 | window);
}
/*
- * Scan a data packet to validate its structure and to work out how many
- * subpackets it contains.
- *
- * A jumbo packet is a collection of consecutive packets glued together with
- * little headers between that indicate how to change the initial header for
- * each subpacket.
- *
- * RXRPC_JUMBO_PACKET must be set on all but the last subpacket - and all but
- * the last are RXRPC_JUMBO_DATALEN in size. The last subpacket may be of any
- * size.
+ * Push a DATA packet onto the Rx queue.
*/
-static bool rxrpc_validate_data(struct sk_buff *skb)
+static void rxrpc_input_queue_data(struct rxrpc_call *call, struct sk_buff *skb,
+ rxrpc_seq_t window, rxrpc_seq_t wtop,
+ enum rxrpc_receive_trace why)
{
struct rxrpc_skb_priv *sp = rxrpc_skb(skb);
- unsigned int offset = sizeof(struct rxrpc_wire_header);
- unsigned int len = skb->len;
- u8 flags = sp->hdr.flags;
+ bool last = sp->hdr.flags & RXRPC_LAST_PACKET;
- for (;;) {
- if (flags & RXRPC_REQUEST_ACK)
- __set_bit(sp->nr_subpackets, sp->rx_req_ack);
- sp->nr_subpackets++;
+ __skb_queue_tail(&call->recvmsg_queue, skb);
+ rxrpc_input_update_ack_window(call, window, wtop);
- if (!(flags & RXRPC_JUMBO_PACKET))
- break;
+ trace_rxrpc_receive(call, last ? why + 1 : why, sp->hdr.serial, sp->hdr.seq);
+}
- if (len - offset < RXRPC_JUMBO_SUBPKTLEN)
- goto protocol_error;
- if (flags & RXRPC_LAST_PACKET)
- goto protocol_error;
- offset += RXRPC_JUMBO_DATALEN;
- if (skb_copy_bits(skb, offset, &flags, 1) < 0)
- goto protocol_error;
- offset += sizeof(struct rxrpc_jumbo_header);
+/*
+ * Process a DATA packet.
+ */
+static void rxrpc_input_data_one(struct rxrpc_call *call, struct sk_buff *skb)
+{
+ struct rxrpc_skb_priv *sp = rxrpc_skb(skb);
+ struct sk_buff *oos;
+ rxrpc_serial_t serial = sp->hdr.serial;
+ u64 win = atomic64_read(&call->ackr_window);
+ rxrpc_seq_t window = lower_32_bits(win);
+ rxrpc_seq_t wtop = upper_32_bits(win);
+ rxrpc_seq_t wlimit = window + call->rx_winsize - 1;
+ rxrpc_seq_t seq = sp->hdr.seq;
+ bool last = sp->hdr.flags & RXRPC_LAST_PACKET;
+ int ack_reason = -1;
+
+ rxrpc_inc_stat(call->rxnet, stat_rx_data);
+ if (sp->hdr.flags & RXRPC_REQUEST_ACK)
+ rxrpc_inc_stat(call->rxnet, stat_rx_data_reqack);
+ if (sp->hdr.flags & RXRPC_JUMBO_PACKET)
+ rxrpc_inc_stat(call->rxnet, stat_rx_data_jumbo);
+
+ if (last) {
+ if (test_and_set_bit(RXRPC_CALL_RX_LAST, &call->flags) &&
+ seq + 1 != wtop) {
+ rxrpc_proto_abort("LSN", call, seq);
+ goto err_free;
+ }
+ } else {
+ if (test_bit(RXRPC_CALL_RX_LAST, &call->flags) &&
+ after_eq(seq, wtop)) {
+ pr_warn("Packet beyond last: c=%x q=%x window=%x-%x wlimit=%x\n",
+ call->debug_id, seq, window, wtop, wlimit);
+ rxrpc_proto_abort("LSA", call, seq);
+ goto err_free;
+ }
}
- if (flags & RXRPC_LAST_PACKET)
- sp->rx_flags |= RXRPC_SKB_INCL_LAST;
- return true;
+ if (after(seq, call->rx_highest_seq))
+ call->rx_highest_seq = seq;
-protocol_error:
- return false;
+ trace_rxrpc_rx_data(call->debug_id, seq, serial, sp->hdr.flags);
+
+ if (before(seq, window)) {
+ ack_reason = RXRPC_ACK_DUPLICATE;
+ goto send_ack;
+ }
+ if (after(seq, wlimit)) {
+ ack_reason = RXRPC_ACK_EXCEEDS_WINDOW;
+ goto send_ack;
+ }
+
+ /* Queue the packet. */
+ if (seq == window) {
+ rxrpc_seq_t reset_from;
+ bool reset_sack = false;
+
+ if (sp->hdr.flags & RXRPC_REQUEST_ACK)
+ ack_reason = RXRPC_ACK_REQUESTED;
+ /* Send an immediate ACK if we fill in a hole */
+ else if (!skb_queue_empty(&call->rx_oos_queue))
+ ack_reason = RXRPC_ACK_DELAY;
+
+ window++;
+ if (after(window, wtop))
+ wtop = window;
+
+ spin_lock(&call->recvmsg_queue.lock);
+ rxrpc_input_queue_data(call, skb, window, wtop, rxrpc_receive_queue);
+ skb = NULL;
+
+ while ((oos = skb_peek(&call->rx_oos_queue))) {
+ struct rxrpc_skb_priv *osp = rxrpc_skb(oos);
+
+ if (after(osp->hdr.seq, window))
+ break;
+
+ __skb_unlink(oos, &call->rx_oos_queue);
+ last = osp->hdr.flags & RXRPC_LAST_PACKET;
+ seq = osp->hdr.seq;
+ if (!reset_sack) {
+ reset_from = seq;
+ reset_sack = true;
+ }
+
+ window++;
+ rxrpc_input_queue_data(call, oos, window, wtop,
+ rxrpc_receive_queue_oos);
+ }
+
+ spin_unlock(&call->recvmsg_queue.lock);
+
+ if (reset_sack) {
+ do {
+ call->ackr_sack_table[reset_from % RXRPC_SACK_SIZE] = 0;
+ } while (reset_from++, before(reset_from, window));
+ }
+ } else {
+ bool keep = false;
+
+ ack_reason = RXRPC_ACK_OUT_OF_SEQUENCE;
+
+ if (!call->ackr_sack_table[seq % RXRPC_SACK_SIZE]) {
+ call->ackr_sack_table[seq % RXRPC_SACK_SIZE] = 1;
+ keep = 1;
+ }
+
+ if (after(seq + 1, wtop)) {
+ wtop = seq + 1;
+ rxrpc_input_update_ack_window(call, window, wtop);
+ }
+
+ if (!keep) {
+ ack_reason = RXRPC_ACK_DUPLICATE;
+ goto send_ack;
+ }
+
+ skb_queue_walk(&call->rx_oos_queue, oos) {
+ struct rxrpc_skb_priv *osp = rxrpc_skb(oos);
+
+ if (after(osp->hdr.seq, seq)) {
+ __skb_queue_before(&call->rx_oos_queue, oos, skb);
+ goto oos_queued;
+ }
+ }
+
+ __skb_queue_tail(&call->rx_oos_queue, skb);
+ oos_queued:
+ trace_rxrpc_receive(call, last ? rxrpc_receive_oos_last : rxrpc_receive_oos,
+ sp->hdr.serial, sp->hdr.seq);
+ skb = NULL;
+ }
+
+send_ack:
+ if (ack_reason < 0 &&
+ atomic_inc_return(&call->ackr_nr_unacked) > 2 &&
+ test_and_set_bit(RXRPC_CALL_IDLE_ACK_PENDING, &call->flags)) {
+ ack_reason = RXRPC_ACK_IDLE;
+ } else if (ack_reason >= 0) {
+ set_bit(RXRPC_CALL_IDLE_ACK_PENDING, &call->flags);
+ }
+
+ if (ack_reason >= 0)
+ rxrpc_send_ACK(call, ack_reason, serial,
+ rxrpc_propose_ack_input_data);
+ else
+ rxrpc_propose_delay_ACK(call, serial,
+ rxrpc_propose_ack_input_data);
+
+err_free:
+ rxrpc_free_skb(skb, rxrpc_skb_freed);
}
/*
- * Handle reception of a duplicate packet.
- *
- * We have to take care to avoid an attack here whereby we're given a series of
- * jumbograms, each with a sequence number one before the preceding one and
- * filled up to maximum UDP size. If they never send us the first packet in
- * the sequence, they can cause us to have to hold on to around 2MiB of kernel
- * space until the call times out.
- *
- * We limit the space usage by only accepting three duplicate jumbo packets per
- * call. After that, we tell the other side we're no longer accepting jumbos
- * (that information is encoded in the ACK packet).
+ * Split a jumbo packet and file the bits separately.
*/
-static void rxrpc_input_dup_data(struct rxrpc_call *call, rxrpc_seq_t seq,
- bool is_jumbo, bool *_jumbo_bad)
+static bool rxrpc_input_split_jumbo(struct rxrpc_call *call, struct sk_buff *skb)
{
- /* Discard normal packets that are duplicates. */
- if (is_jumbo)
- return;
+ struct rxrpc_jumbo_header jhdr;
+ struct rxrpc_skb_priv *sp = rxrpc_skb(skb), *jsp;
+ struct sk_buff *jskb;
+ unsigned int offset = sizeof(struct rxrpc_wire_header);
+ unsigned int len = skb->len - offset;
- /* Skip jumbo subpackets that are duplicates. When we've had three or
- * more partially duplicate jumbo packets, we refuse to take any more
- * jumbos for this call.
- */
- if (!*_jumbo_bad) {
- call->nr_jumbo_bad++;
- *_jumbo_bad = true;
+ while (sp->hdr.flags & RXRPC_JUMBO_PACKET) {
+ if (len < RXRPC_JUMBO_SUBPKTLEN)
+ goto protocol_error;
+ if (sp->hdr.flags & RXRPC_LAST_PACKET)
+ goto protocol_error;
+ if (skb_copy_bits(skb, offset + RXRPC_JUMBO_DATALEN,
+ &jhdr, sizeof(jhdr)) < 0)
+ goto protocol_error;
+
+ jskb = skb_clone(skb, GFP_ATOMIC);
+ if (!jskb) {
+ kdebug("couldn't clone");
+ return false;
+ }
+ rxrpc_new_skb(jskb, rxrpc_skb_cloned_jumbo);
+ jsp = rxrpc_skb(jskb);
+ jsp->offset = offset;
+ jsp->len = RXRPC_JUMBO_DATALEN;
+ rxrpc_input_data_one(call, jskb);
+
+ sp->hdr.flags = jhdr.flags;
+ sp->hdr._rsvd = ntohs(jhdr._rsvd);
+ sp->hdr.seq++;
+ sp->hdr.serial++;
+ offset += RXRPC_JUMBO_SUBPKTLEN;
+ len -= RXRPC_JUMBO_SUBPKTLEN;
}
+
+ sp->offset = offset;
+ sp->len = len;
+ rxrpc_input_data_one(call, skb);
+ return true;
+
+protocol_error:
+ return false;
}
/*
@@ -412,17 +544,15 @@ static void rxrpc_input_data(struct rxrpc_call *call, struct sk_buff *skb)
{
struct rxrpc_skb_priv *sp = rxrpc_skb(skb);
enum rxrpc_call_state state;
- unsigned int j, nr_subpackets, nr_unacked = 0;
- rxrpc_serial_t serial = sp->hdr.serial, ack_serial = serial;
- rxrpc_seq_t seq0 = sp->hdr.seq, hard_ack;
- bool immediate_ack = false, jumbo_bad = false;
- u8 ack = 0;
+ rxrpc_serial_t serial = sp->hdr.serial;
+ rxrpc_seq_t seq0 = sp->hdr.seq;
- _enter("{%u,%u},{%u,%u}",
- call->rx_hard_ack, call->rx_top, skb->len, seq0);
+ _enter("{%llx,%x},{%u,%x}",
+ atomic64_read(&call->ackr_window), call->rx_highest_seq,
+ skb->len, seq0);
- _proto("Rx DATA %%%u { #%u f=%02x n=%u }",
- sp->hdr.serial, seq0, sp->hdr.flags, sp->nr_subpackets);
+ _proto("Rx DATA %%%u { #%u f=%02x }",
+ sp->hdr.serial, seq0, sp->hdr.flags);
state = READ_ONCE(call->state);
if (state >= RXRPC_CALL_COMPLETE) {
@@ -430,6 +560,24 @@ static void rxrpc_input_data(struct rxrpc_call *call, struct sk_buff *skb)
return;
}
+ /* Unshare the packet so that it can be modified for in-place
+ * decryption.
+ */
+ if (sp->hdr.securityIndex != 0) {
+ struct sk_buff *nskb = skb_unshare(skb, GFP_ATOMIC);
+ if (!nskb) {
+ rxrpc_eaten_skb(skb, rxrpc_skb_unshared_nomem);
+ return;
+ }
+
+ if (nskb != skb) {
+ rxrpc_eaten_skb(skb, rxrpc_skb_received);
+ skb = nskb;
+ rxrpc_new_skb(skb, rxrpc_skb_unshared);
+ sp = rxrpc_skb(skb);
+ }
+ }
+
if (state == RXRPC_CALL_SERVER_RECV_REQUEST) {
unsigned long timo = READ_ONCE(call->next_req_timo);
unsigned long now, expect_req_by;
@@ -451,166 +599,18 @@ static void rxrpc_input_data(struct rxrpc_call *call, struct sk_buff *skb)
if ((state == RXRPC_CALL_CLIENT_SEND_REQUEST ||
state == RXRPC_CALL_CLIENT_AWAIT_REPLY) &&
!rxrpc_receiving_reply(call))
- goto unlock;
-
- hard_ack = READ_ONCE(call->rx_hard_ack);
-
- nr_subpackets = sp->nr_subpackets;
- if (nr_subpackets > 1) {
- if (call->nr_jumbo_bad > 3) {
- ack = RXRPC_ACK_NOSPACE;
- ack_serial = serial;
- goto ack;
- }
- }
-
- for (j = 0; j < nr_subpackets; j++) {
- rxrpc_serial_t serial = sp->hdr.serial + j;
- rxrpc_seq_t seq = seq0 + j;
- unsigned int ix = seq & RXRPC_RXTX_BUFF_MASK;
- bool terminal = (j == nr_subpackets - 1);
- bool last = terminal && (sp->rx_flags & RXRPC_SKB_INCL_LAST);
- u8 flags, annotation = j;
-
- _proto("Rx DATA+%u %%%u { #%x t=%u l=%u }",
- j, serial, seq, terminal, last);
-
- if (last) {
- if (test_bit(RXRPC_CALL_RX_LAST, &call->flags) &&
- seq != call->rx_top) {
- rxrpc_proto_abort("LSN", call, seq);
- goto unlock;
- }
- } else {
- if (test_bit(RXRPC_CALL_RX_LAST, &call->flags) &&
- after_eq(seq, call->rx_top)) {
- rxrpc_proto_abort("LSA", call, seq);
- goto unlock;
- }
- }
-
- flags = 0;
- if (last)
- flags |= RXRPC_LAST_PACKET;
- if (!terminal)
- flags |= RXRPC_JUMBO_PACKET;
- if (test_bit(j, sp->rx_req_ack))
- flags |= RXRPC_REQUEST_ACK;
- trace_rxrpc_rx_data(call->debug_id, seq, serial, flags, annotation);
-
- if (before_eq(seq, hard_ack)) {
- ack = RXRPC_ACK_DUPLICATE;
- ack_serial = serial;
- continue;
- }
-
- if (call->rxtx_buffer[ix]) {
- rxrpc_input_dup_data(call, seq, nr_subpackets > 1,
- &jumbo_bad);
- if (ack != RXRPC_ACK_DUPLICATE) {
- ack = RXRPC_ACK_DUPLICATE;
- ack_serial = serial;
- }
- immediate_ack = true;
- continue;
- }
-
- if (after(seq, hard_ack + call->rx_winsize)) {
- ack = RXRPC_ACK_EXCEEDS_WINDOW;
- ack_serial = serial;
- if (flags & RXRPC_JUMBO_PACKET) {
- if (!jumbo_bad) {
- call->nr_jumbo_bad++;
- jumbo_bad = true;
- }
- }
-
- goto ack;
- }
-
- if (flags & RXRPC_REQUEST_ACK && !ack) {
- ack = RXRPC_ACK_REQUESTED;
- ack_serial = serial;
- }
-
- if (after(seq0, call->ackr_highest_seq))
- call->ackr_highest_seq = seq0;
-
- /* Queue the packet. We use a couple of memory barriers here as need
- * to make sure that rx_top is perceived to be set after the buffer
- * pointer and that the buffer pointer is set after the annotation and
- * the skb data.
- *
- * Barriers against rxrpc_recvmsg_data() and rxrpc_rotate_rx_window()
- * and also rxrpc_fill_out_ack().
- */
- if (!terminal)
- rxrpc_get_skb(skb, rxrpc_skb_got);
- call->rxtx_annotations[ix] = annotation;
- smp_wmb();
- call->rxtx_buffer[ix] = skb;
- if (after(seq, call->rx_top)) {
- smp_store_release(&call->rx_top, seq);
- } else if (before(seq, call->rx_top)) {
- /* Send an immediate ACK if we fill in a hole */
- if (!ack) {
- ack = RXRPC_ACK_DELAY;
- ack_serial = serial;
- }
- immediate_ack = true;
- }
-
- if (terminal) {
- /* From this point on, we're not allowed to touch the
- * packet any longer as its ref now belongs to the Rx
- * ring.
- */
- skb = NULL;
- sp = NULL;
- }
-
- nr_unacked++;
-
- if (last) {
- set_bit(RXRPC_CALL_RX_LAST, &call->flags);
- if (!ack) {
- ack = RXRPC_ACK_DELAY;
- ack_serial = serial;
- }
- trace_rxrpc_receive(call, rxrpc_receive_queue_last, serial, seq);
- } else {
- trace_rxrpc_receive(call, rxrpc_receive_queue, serial, seq);
- }
+ goto out;
- if (after_eq(seq, call->rx_expect_next)) {
- if (after(seq, call->rx_expect_next)) {
- _net("OOS %u > %u", seq, call->rx_expect_next);
- ack = RXRPC_ACK_OUT_OF_SEQUENCE;
- ack_serial = serial;
- }
- call->rx_expect_next = seq + 1;
- }
- if (!ack)
- ack_serial = serial;
+ if (!rxrpc_input_split_jumbo(call, skb)) {
+ rxrpc_proto_abort("VLD", call, sp->hdr.seq);
+ goto out;
}
+ skb = NULL;
-ack:
- if (atomic_add_return(nr_unacked, &call->ackr_nr_unacked) > 2 && !ack)
- ack = RXRPC_ACK_IDLE;
-
- if (ack)
- rxrpc_propose_ACK(call, ack, ack_serial,
- immediate_ack, true,
- rxrpc_propose_ack_input_data);
- else
- rxrpc_propose_ACK(call, RXRPC_ACK_DELAY, serial,
- false, true,
- rxrpc_propose_ack_input_data);
-
+out:
trace_rxrpc_notify_socket(call->debug_id, serial);
rxrpc_notify_socket(call);
-unlock:
spin_unlock(&call->input_lock);
rxrpc_free_skb(skb, rxrpc_skb_freed);
_leave(" [queued]");
@@ -679,31 +679,8 @@ static void rxrpc_complete_rtt_probe(struct rxrpc_call *call,
*/
static void rxrpc_input_check_for_lost_ack(struct rxrpc_call *call)
{
- rxrpc_seq_t top, bottom, seq;
- bool resend = false;
-
- spin_lock_bh(&call->lock);
-
- bottom = call->tx_hard_ack + 1;
- top = call->acks_lost_top;
- if (before(bottom, top)) {
- for (seq = bottom; before_eq(seq, top); seq++) {
- int ix = seq & RXRPC_RXTX_BUFF_MASK;
- u8 annotation = call->rxtx_annotations[ix];
- u8 anno_type = annotation & RXRPC_TX_ANNO_MASK;
-
- if (anno_type != RXRPC_TX_ANNO_UNACK)
- continue;
- annotation &= ~RXRPC_TX_ANNO_MASK;
- annotation |= RXRPC_TX_ANNO_RETRANS;
- call->rxtx_annotations[ix] = annotation;
- resend = true;
- }
- }
-
- spin_unlock_bh(&call->lock);
-
- if (resend && !test_and_set_bit(RXRPC_CALL_EV_RESEND, &call->events))
+ if (after(call->acks_lost_top, call->acks_prev_seq) &&
+ !test_and_set_bit(RXRPC_CALL_EV_RESEND, &call->events))
rxrpc_queue_call(call);
}
@@ -736,8 +713,8 @@ static void rxrpc_input_ackinfo(struct rxrpc_call *call, struct sk_buff *skb,
ntohl(ackinfo->rxMTU), ntohl(ackinfo->maxMTU),
rwind, ntohl(ackinfo->jumbo_max));
- if (rwind > RXRPC_RXTX_BUFF_SIZE - 1)
- rwind = RXRPC_RXTX_BUFF_SIZE - 1;
+ if (rwind > RXRPC_TX_MAX_WINDOW)
+ rwind = RXRPC_TX_MAX_WINDOW;
if (call->tx_winsize != rwind) {
if (rwind > call->tx_winsize)
wake = true;
@@ -776,40 +753,19 @@ static void rxrpc_input_soft_acks(struct rxrpc_call *call, u8 *acks,
rxrpc_seq_t seq, int nr_acks,
struct rxrpc_ack_summary *summary)
{
- int ix;
- u8 annotation, anno_type;
-
- for (; nr_acks > 0; nr_acks--, seq++) {
- ix = seq & RXRPC_RXTX_BUFF_MASK;
- annotation = call->rxtx_annotations[ix];
- anno_type = annotation & RXRPC_TX_ANNO_MASK;
- annotation &= ~RXRPC_TX_ANNO_MASK;
- switch (*acks++) {
- case RXRPC_ACK_TYPE_ACK:
+ unsigned int i;
+
+ for (i = 0; i < nr_acks; i++) {
+ if (acks[i] == RXRPC_ACK_TYPE_ACK) {
summary->nr_acks++;
- if (anno_type == RXRPC_TX_ANNO_ACK)
- continue;
summary->nr_new_acks++;
- call->rxtx_annotations[ix] =
- RXRPC_TX_ANNO_ACK | annotation;
- break;
- case RXRPC_ACK_TYPE_NACK:
- if (!summary->nr_nacks &&
- call->acks_lowest_nak != seq) {
- call->acks_lowest_nak = seq;
+ } else {
+ if (!summary->saw_nacks &&
+ call->acks_lowest_nak != seq + i) {
+ call->acks_lowest_nak = seq + i;
summary->new_low_nack = true;
}
- summary->nr_nacks++;
- if (anno_type == RXRPC_TX_ANNO_NAK)
- continue;
- summary->nr_new_nacks++;
- if (anno_type == RXRPC_TX_ANNO_RETRANS)
- continue;
- call->rxtx_annotations[ix] =
- RXRPC_TX_ANNO_NAK | annotation;
- break;
- default:
- return rxrpc_proto_abort("SFT", call, 0);
+ summary->saw_nacks = true;
}
}
}
@@ -851,12 +807,10 @@ static bool rxrpc_is_ack_valid(struct rxrpc_call *call,
static void rxrpc_input_ack(struct rxrpc_call *call, struct sk_buff *skb)
{
struct rxrpc_ack_summary summary = { 0 };
+ struct rxrpc_ackpacket ack;
struct rxrpc_skb_priv *sp = rxrpc_skb(skb);
- union {
- struct rxrpc_ackpacket ack;
- struct rxrpc_ackinfo info;
- u8 acks[RXRPC_MAXACKS];
- } buf;
+ struct rxrpc_ackinfo info;
+ struct sk_buff *skb_old = NULL, *skb_put = skb;
rxrpc_serial_t ack_serial, acked_serial;
rxrpc_seq_t first_soft_ack, hard_ack, prev_pkt;
int nr_acks, offset, ioffset;
@@ -864,29 +818,28 @@ static void rxrpc_input_ack(struct rxrpc_call *call, struct sk_buff *skb)
_enter("");
offset = sizeof(struct rxrpc_wire_header);
- if (skb_copy_bits(skb, offset, &buf.ack, sizeof(buf.ack)) < 0) {
- _debug("extraction failure");
- return rxrpc_proto_abort("XAK", call, 0);
+ if (skb_copy_bits(skb, offset, &ack, sizeof(ack)) < 0) {
+ rxrpc_proto_abort("XAK", call, 0);
+ goto out_not_locked;
}
- offset += sizeof(buf.ack);
+ offset += sizeof(ack);
ack_serial = sp->hdr.serial;
- acked_serial = ntohl(buf.ack.serial);
- first_soft_ack = ntohl(buf.ack.firstPacket);
- prev_pkt = ntohl(buf.ack.previousPacket);
+ acked_serial = ntohl(ack.serial);
+ first_soft_ack = ntohl(ack.firstPacket);
+ prev_pkt = ntohl(ack.previousPacket);
hard_ack = first_soft_ack - 1;
- nr_acks = buf.ack.nAcks;
- summary.ack_reason = (buf.ack.reason < RXRPC_ACK__INVALID ?
- buf.ack.reason : RXRPC_ACK__INVALID);
+ nr_acks = ack.nAcks;
+ summary.ack_reason = (ack.reason < RXRPC_ACK__INVALID ?
+ ack.reason : RXRPC_ACK__INVALID);
trace_rxrpc_rx_ack(call, ack_serial, acked_serial,
first_soft_ack, prev_pkt,
summary.ack_reason, nr_acks);
+ rxrpc_inc_stat(call->rxnet, stat_rx_acks[ack.reason]);
- switch (buf.ack.reason) {
+ switch (ack.reason) {
case RXRPC_ACK_PING_RESPONSE:
- rxrpc_input_ping_response(call, skb->tstamp, acked_serial,
- ack_serial);
rxrpc_complete_rtt_probe(call, skb->tstamp, acked_serial, ack_serial,
rxrpc_rtt_rx_ping_response);
break;
@@ -901,22 +854,20 @@ static void rxrpc_input_ack(struct rxrpc_call *call, struct sk_buff *skb)
break;
}
- if (buf.ack.reason == RXRPC_ACK_PING) {
+ if (ack.reason == RXRPC_ACK_PING) {
_proto("Rx ACK %%%u PING Request", ack_serial);
- rxrpc_propose_ACK(call, RXRPC_ACK_PING_RESPONSE,
- ack_serial, true, true,
- rxrpc_propose_ack_respond_to_ping);
+ rxrpc_send_ACK(call, RXRPC_ACK_PING_RESPONSE, ack_serial,
+ rxrpc_propose_ack_respond_to_ping);
} else if (sp->hdr.flags & RXRPC_REQUEST_ACK) {
- rxrpc_propose_ACK(call, RXRPC_ACK_REQUESTED,
- ack_serial, true, true,
- rxrpc_propose_ack_respond_to_ack);
+ rxrpc_send_ACK(call, RXRPC_ACK_REQUESTED, ack_serial,
+ rxrpc_propose_ack_respond_to_ack);
}
/* If we get an EXCEEDS_WINDOW ACK from the server, it probably
* indicates that the client address changed due to NAT. The server
* lost the call because it switched to a different peer.
*/
- if (unlikely(buf.ack.reason == RXRPC_ACK_EXCEEDS_WINDOW) &&
+ if (unlikely(ack.reason == RXRPC_ACK_EXCEEDS_WINDOW) &&
first_soft_ack == 1 &&
prev_pkt == 0 &&
rxrpc_is_client_call(call)) {
@@ -929,10 +880,10 @@ static void rxrpc_input_ack(struct rxrpc_call *call, struct sk_buff *skb)
* indicate a change of address. However, we can retransmit the call
* if we still have it buffered to the beginning.
*/
- if (unlikely(buf.ack.reason == RXRPC_ACK_OUT_OF_SEQUENCE) &&
+ if (unlikely(ack.reason == RXRPC_ACK_OUT_OF_SEQUENCE) &&
first_soft_ack == 1 &&
prev_pkt == 0 &&
- call->tx_hard_ack == 0 &&
+ call->acks_hard_ack == 0 &&
rxrpc_is_client_call(call)) {
rxrpc_set_call_completion(call, RXRPC_CALL_REMOTELY_ABORTED,
0, -ENETRESET);
@@ -944,14 +895,19 @@ static void rxrpc_input_ack(struct rxrpc_call *call, struct sk_buff *skb)
trace_rxrpc_rx_discard_ack(call->debug_id, ack_serial,
first_soft_ack, call->acks_first_seq,
prev_pkt, call->acks_prev_seq);
- return;
+ goto out_not_locked;
}
- buf.info.rxMTU = 0;
+ info.rxMTU = 0;
ioffset = offset + nr_acks + 3;
- if (skb->len >= ioffset + sizeof(buf.info) &&
- skb_copy_bits(skb, ioffset, &buf.info, sizeof(buf.info)) < 0)
- return rxrpc_proto_abort("XAI", call, 0);
+ if (skb->len >= ioffset + sizeof(info) &&
+ skb_copy_bits(skb, ioffset, &info, sizeof(info)) < 0) {
+ rxrpc_proto_abort("XAI", call, 0);
+ goto out_not_locked;
+ }
+
+ if (nr_acks > 0)
+ skb_condense(skb);
spin_lock(&call->input_lock);
@@ -967,9 +923,22 @@ static void rxrpc_input_ack(struct rxrpc_call *call, struct sk_buff *skb)
call->acks_first_seq = first_soft_ack;
call->acks_prev_seq = prev_pkt;
+ switch (ack.reason) {
+ case RXRPC_ACK_PING:
+ break;
+ case RXRPC_ACK_PING_RESPONSE:
+ rxrpc_input_ping_response(call, skb->tstamp, acked_serial,
+ ack_serial);
+ fallthrough;
+ default:
+ if (after(acked_serial, call->acks_highest_serial))
+ call->acks_highest_serial = acked_serial;
+ break;
+ }
+
/* Parse rwind and mtu sizes if provided. */
- if (buf.info.rxMTU)
- rxrpc_input_ackinfo(call, skb, &buf.info);
+ if (info.rxMTU)
+ rxrpc_input_ackinfo(call, skb, &info);
if (first_soft_ack == 0) {
rxrpc_proto_abort("AK0", call, 0);
@@ -987,7 +956,7 @@ static void rxrpc_input_ack(struct rxrpc_call *call, struct sk_buff *skb)
goto out;
}
- if (before(hard_ack, call->tx_hard_ack) ||
+ if (before(hard_ack, call->acks_hard_ack) ||
after(hard_ack, call->tx_top)) {
rxrpc_proto_abort("AKW", call, 0);
goto out;
@@ -997,7 +966,7 @@ static void rxrpc_input_ack(struct rxrpc_call *call, struct sk_buff *skb)
goto out;
}
- if (after(hard_ack, call->tx_hard_ack)) {
+ if (after(hard_ack, call->acks_hard_ack)) {
if (rxrpc_rotate_tx_window(call, hard_ack, &summary)) {
rxrpc_end_tx_phase(call, false, "ETA");
goto out;
@@ -1005,25 +974,38 @@ static void rxrpc_input_ack(struct rxrpc_call *call, struct sk_buff *skb)
}
if (nr_acks > 0) {
- if (skb_copy_bits(skb, offset, buf.acks, nr_acks) < 0) {
+ if (offset > (int)skb->len - nr_acks) {
rxrpc_proto_abort("XSA", call, 0);
goto out;
}
- rxrpc_input_soft_acks(call, buf.acks, first_soft_ack, nr_acks,
- &summary);
+
+ spin_lock(&call->acks_ack_lock);
+ skb_old = call->acks_soft_tbl;
+ call->acks_soft_tbl = skb;
+ spin_unlock(&call->acks_ack_lock);
+
+ rxrpc_input_soft_acks(call, skb->data + offset, first_soft_ack,
+ nr_acks, &summary);
+ skb_put = NULL;
+ } else if (call->acks_soft_tbl) {
+ spin_lock(&call->acks_ack_lock);
+ skb_old = call->acks_soft_tbl;
+ call->acks_soft_tbl = NULL;
+ spin_unlock(&call->acks_ack_lock);
}
- if (call->rxtx_annotations[call->tx_top & RXRPC_RXTX_BUFF_MASK] &
- RXRPC_TX_ANNO_LAST &&
+ if (test_bit(RXRPC_CALL_TX_LAST, &call->flags) &&
summary.nr_acks == call->tx_top - hard_ack &&
rxrpc_is_client_call(call))
- rxrpc_propose_ACK(call, RXRPC_ACK_PING, ack_serial,
- false, true,
- rxrpc_propose_ack_ping_for_lost_reply);
+ rxrpc_propose_ping(call, ack_serial,
+ rxrpc_propose_ack_ping_for_lost_reply);
rxrpc_congestion_management(call, skb, &summary, acked_serial);
out:
spin_unlock(&call->input_lock);
+out_not_locked:
+ rxrpc_free_skb(skb_put, rxrpc_skb_freed);
+ rxrpc_free_skb(skb_old, rxrpc_skb_freed);
}
/*
@@ -1096,7 +1078,7 @@ static void rxrpc_input_call_packet(struct rxrpc_call *call,
case RXRPC_PACKET_TYPE_ACK:
rxrpc_input_ack(call, skb);
- break;
+ goto no_free;
case RXRPC_PACKET_TYPE_BUSY:
_proto("Rx BUSY %%%u", sp->hdr.serial);
@@ -1307,8 +1289,6 @@ int rxrpc_input_packet(struct sock *udp_sk, struct sk_buff *skb)
if (sp->hdr.callNumber == 0 ||
sp->hdr.seq == 0)
goto bad_message;
- if (!rxrpc_validate_data(skb))
- goto bad_message;
/* Unshare the packet so that it can be modified for in-place
* decryption.
@@ -1422,7 +1402,7 @@ int rxrpc_input_packet(struct sock *udp_sk, struct sk_buff *skb)
trace_rxrpc_rx_data(chan->call_debug_id,
sp->hdr.seq,
sp->hdr.serial,
- sp->hdr.flags, 0);
+ sp->hdr.flags);
rxrpc_post_packet_to_conn(conn, skb);
goto out;
}
diff --git a/net/rxrpc/insecure.c b/net/rxrpc/insecure.c
index 9aae99d67833..0eb8471bfc53 100644
--- a/net/rxrpc/insecure.c
+++ b/net/rxrpc/insecure.c
@@ -25,16 +25,16 @@ static int none_how_much_data(struct rxrpc_call *call, size_t remain,
return 0;
}
-static int none_secure_packet(struct rxrpc_call *call, struct sk_buff *skb,
- size_t data_size)
+static int none_secure_packet(struct rxrpc_call *call, struct rxrpc_txbuf *txb)
{
return 0;
}
-static int none_verify_packet(struct rxrpc_call *call, struct sk_buff *skb,
- unsigned int offset, unsigned int len,
- rxrpc_seq_t seq, u16 expected_cksum)
+static int none_verify_packet(struct rxrpc_call *call, struct sk_buff *skb)
{
+ struct rxrpc_skb_priv *sp = rxrpc_skb(skb);
+
+ sp->flags |= RXRPC_RX_VERIFIED;
return 0;
}
@@ -42,11 +42,6 @@ static void none_free_call_crypto(struct rxrpc_call *call)
{
}
-static void none_locate_data(struct rxrpc_call *call, struct sk_buff *skb,
- unsigned int *_offset, unsigned int *_len)
-{
-}
-
static int none_respond_to_challenge(struct rxrpc_connection *conn,
struct sk_buff *skb,
u32 *_abort_code)
@@ -95,7 +90,6 @@ const struct rxrpc_security rxrpc_no_security = {
.how_much_data = none_how_much_data,
.secure_packet = none_secure_packet,
.verify_packet = none_verify_packet,
- .locate_data = none_locate_data,
.respond_to_challenge = none_respond_to_challenge,
.verify_response = none_verify_response,
.clear = none_clear,
diff --git a/net/rxrpc/local_object.c b/net/rxrpc/local_object.c
index 38ea98ff426b..a178f71e5082 100644
--- a/net/rxrpc/local_object.c
+++ b/net/rxrpc/local_object.c
@@ -24,6 +24,19 @@ static void rxrpc_local_processor(struct work_struct *);
static void rxrpc_local_rcu(struct rcu_head *);
/*
+ * Handle an ICMP/ICMP6 error turning up at the tunnel. Push it through the
+ * usual mechanism so that it gets parsed and presented through the UDP
+ * socket's error_report().
+ */
+static void rxrpc_encap_err_rcv(struct sock *sk, struct sk_buff *skb, int err,
+ __be16 port, u32 info, u8 *payload)
+{
+ if (ip_hdr(skb)->version == IPVERSION)
+ return ip_icmp_error(sk, skb, err, port, info, payload);
+ return ipv6_icmp_error(sk, skb, err, port, info, payload);
+}
+
+/*
* Compare a local to an address. Return -ve, 0 or +ve to indicate less than,
* same or greater than.
*
@@ -84,6 +97,8 @@ static struct rxrpc_local *rxrpc_alloc_local(struct rxrpc_net *rxnet,
local->rxnet = rxnet;
INIT_HLIST_NODE(&local->link);
INIT_WORK(&local->processor, rxrpc_local_processor);
+ INIT_LIST_HEAD(&local->ack_tx_queue);
+ spin_lock_init(&local->ack_tx_lock);
init_rwsem(&local->defrag_sem);
skb_queue_head_init(&local->reject_queue);
skb_queue_head_init(&local->event_queue);
@@ -419,6 +434,11 @@ static void rxrpc_local_processor(struct work_struct *work)
break;
}
+ if (!list_empty(&local->ack_tx_queue)) {
+ rxrpc_transmit_ack_packets(local);
+ again = true;
+ }
+
if (!skb_queue_empty(&local->reject_queue)) {
rxrpc_reject_packets(local);
again = true;
diff --git a/net/rxrpc/misc.c b/net/rxrpc/misc.c
index d4144fd86f84..056c428d8bf3 100644
--- a/net/rxrpc/misc.c
+++ b/net/rxrpc/misc.c
@@ -17,12 +17,6 @@
unsigned int rxrpc_max_backlog __read_mostly = 10;
/*
- * How long to wait before scheduling ACK generation after seeing a
- * packet with RXRPC_REQUEST_ACK set (in jiffies).
- */
-unsigned long rxrpc_requested_ack_delay = 1;
-
-/*
* How long to wait before scheduling an ACK with subtype DELAY (in jiffies).
*
* We use this when we've received new data packets. If those packets aren't
@@ -46,10 +40,7 @@ unsigned long rxrpc_idle_ack_delay = HZ / 2;
* limit is hit, we should generate an EXCEEDS_WINDOW ACK and discard further
* packets.
*/
-unsigned int rxrpc_rx_window_size = RXRPC_INIT_RX_WINDOW_SIZE;
-#if (RXRPC_RXTX_BUFF_SIZE - 1) < RXRPC_INIT_RX_WINDOW_SIZE
-#error Need to reduce RXRPC_INIT_RX_WINDOW_SIZE
-#endif
+unsigned int rxrpc_rx_window_size = 255;
/*
* Maximum Rx MTU size. This indicates to the sender the size of jumbo packet
@@ -62,15 +53,3 @@ unsigned int rxrpc_rx_mtu = 5692;
* sender that we're willing to handle.
*/
unsigned int rxrpc_rx_jumbo_max = 4;
-
-const s8 rxrpc_ack_priority[] = {
- [0] = 0,
- [RXRPC_ACK_DELAY] = 1,
- [RXRPC_ACK_REQUESTED] = 2,
- [RXRPC_ACK_IDLE] = 3,
- [RXRPC_ACK_DUPLICATE] = 4,
- [RXRPC_ACK_OUT_OF_SEQUENCE] = 5,
- [RXRPC_ACK_EXCEEDS_WINDOW] = 6,
- [RXRPC_ACK_NOSPACE] = 7,
- [RXRPC_ACK_PING_RESPONSE] = 8,
-};
diff --git a/net/rxrpc/net_ns.c b/net/rxrpc/net_ns.c
index bb4c25d6df64..84242c0e467c 100644
--- a/net/rxrpc/net_ns.c
+++ b/net/rxrpc/net_ns.c
@@ -101,6 +101,8 @@ static __net_init int rxrpc_init_net(struct net *net)
proc_create_net("locals", 0444, rxnet->proc_net,
&rxrpc_local_seq_ops,
sizeof(struct seq_net_private));
+ proc_create_net_single_write("stats", S_IFREG | 0644, rxnet->proc_net,
+ rxrpc_stats_show, rxrpc_stats_clear, NULL);
return 0;
err_proc:
diff --git a/net/rxrpc/output.c b/net/rxrpc/output.c
index 9683617db704..46432e70a16b 100644
--- a/net/rxrpc/output.c
+++ b/net/rxrpc/output.c
@@ -13,15 +13,21 @@
#include <linux/export.h>
#include <net/sock.h>
#include <net/af_rxrpc.h>
+#include <net/udp.h>
#include "ar-internal.h"
-struct rxrpc_ack_buffer {
- struct rxrpc_wire_header whdr;
- struct rxrpc_ackpacket ack;
- u8 acks[255];
- u8 pad[3];
- struct rxrpc_ackinfo ackinfo;
-};
+extern int udpv6_sendmsg(struct sock *sk, struct msghdr *msg, size_t len);
+
+static ssize_t do_udp_sendmsg(struct socket *sk, struct msghdr *msg, size_t len)
+{
+#if IS_ENABLED(CONFIG_AF_RXRPC_IPV6)
+ struct sockaddr *sa = msg->msg_name;
+
+ if (sa->sa_family == AF_INET6)
+ return udpv6_sendmsg(sk->sk, msg, len);
+#endif
+ return udp_sendmsg(sk->sk, msg, len);
+}
struct rxrpc_abort_buffer {
struct rxrpc_wire_header whdr;
@@ -68,66 +74,83 @@ static void rxrpc_set_keepalive(struct rxrpc_call *call)
*/
static size_t rxrpc_fill_out_ack(struct rxrpc_connection *conn,
struct rxrpc_call *call,
- struct rxrpc_ack_buffer *pkt,
- rxrpc_seq_t *_hard_ack,
- rxrpc_seq_t *_top,
- u8 reason)
+ struct rxrpc_txbuf *txb)
{
- rxrpc_serial_t serial;
- unsigned int tmp;
- rxrpc_seq_t hard_ack, top, seq;
- int ix;
+ struct rxrpc_ackinfo ackinfo;
+ unsigned int qsize;
+ rxrpc_seq_t window, wtop, wrap_point, ix, first;
+ int rsize;
+ u64 wtmp;
u32 mtu, jmax;
- u8 *ackp = pkt->acks;
+ u8 *ackp = txb->acks;
+ u8 sack_buffer[sizeof(call->ackr_sack_table)] __aligned(8);
- tmp = atomic_xchg(&call->ackr_nr_unacked, 0);
- tmp |= atomic_xchg(&call->ackr_nr_consumed, 0);
- if (!tmp && (reason == RXRPC_ACK_DELAY ||
- reason == RXRPC_ACK_IDLE))
- return 0;
+ atomic_set(&call->ackr_nr_unacked, 0);
+ atomic_set(&call->ackr_nr_consumed, 0);
+ rxrpc_inc_stat(call->rxnet, stat_tx_ack_fill);
/* Barrier against rxrpc_input_data(). */
- serial = call->ackr_serial;
- hard_ack = READ_ONCE(call->rx_hard_ack);
- top = smp_load_acquire(&call->rx_top);
- *_hard_ack = hard_ack;
- *_top = top;
-
- pkt->ack.bufferSpace = htons(8);
- pkt->ack.maxSkew = htons(0);
- pkt->ack.firstPacket = htonl(hard_ack + 1);
- pkt->ack.previousPacket = htonl(call->ackr_highest_seq);
- pkt->ack.serial = htonl(serial);
- pkt->ack.reason = reason;
- pkt->ack.nAcks = top - hard_ack;
-
- if (reason == RXRPC_ACK_PING)
- pkt->whdr.flags |= RXRPC_REQUEST_ACK;
-
- if (after(top, hard_ack)) {
- seq = hard_ack + 1;
- do {
- ix = seq & RXRPC_RXTX_BUFF_MASK;
- if (call->rxtx_buffer[ix])
- *ackp++ = RXRPC_ACK_TYPE_ACK;
- else
- *ackp++ = RXRPC_ACK_TYPE_NACK;
- seq++;
- } while (before_eq(seq, top));
+retry:
+ wtmp = atomic64_read_acquire(&call->ackr_window);
+ window = lower_32_bits(wtmp);
+ wtop = upper_32_bits(wtmp);
+ txb->ack.firstPacket = htonl(window);
+ txb->ack.nAcks = 0;
+
+ if (after(wtop, window)) {
+ /* Try to copy the SACK ring locklessly. We can use the copy,
+ * only if the now-current top of the window didn't go past the
+ * previously read base - otherwise we can't know whether we
+ * have old data or new data.
+ */
+ memcpy(sack_buffer, call->ackr_sack_table, sizeof(sack_buffer));
+ wrap_point = window + RXRPC_SACK_SIZE - 1;
+ wtmp = atomic64_read_acquire(&call->ackr_window);
+ window = lower_32_bits(wtmp);
+ wtop = upper_32_bits(wtmp);
+ if (after(wtop, wrap_point)) {
+ cond_resched();
+ goto retry;
+ }
+
+ /* The buffer is maintained as a ring with an invariant mapping
+ * between bit position and sequence number, so we'll probably
+ * need to rotate it.
+ */
+ txb->ack.nAcks = wtop - window;
+ ix = window % RXRPC_SACK_SIZE;
+ first = sizeof(sack_buffer) - ix;
+
+ if (ix + txb->ack.nAcks <= RXRPC_SACK_SIZE) {
+ memcpy(txb->acks, sack_buffer + ix, txb->ack.nAcks);
+ } else {
+ memcpy(txb->acks, sack_buffer + ix, first);
+ memcpy(txb->acks + first, sack_buffer,
+ txb->ack.nAcks - first);
+ }
+
+ ackp += txb->ack.nAcks;
+ } else if (before(wtop, window)) {
+ pr_warn("ack window backward %x %x", window, wtop);
+ } else if (txb->ack.reason == RXRPC_ACK_DELAY) {
+ txb->ack.reason = RXRPC_ACK_IDLE;
}
mtu = conn->params.peer->if_mtu;
mtu -= conn->params.peer->hdrsize;
- jmax = (call->nr_jumbo_bad > 3) ? 1 : rxrpc_rx_jumbo_max;
- pkt->ackinfo.rxMTU = htonl(rxrpc_rx_mtu);
- pkt->ackinfo.maxMTU = htonl(mtu);
- pkt->ackinfo.rwind = htonl(call->rx_winsize);
- pkt->ackinfo.jumbo_max = htonl(jmax);
+ jmax = rxrpc_rx_jumbo_max;
+ qsize = (window - 1) - call->rx_consumed;
+ rsize = max_t(int, call->rx_winsize - qsize, 0);
+ ackinfo.rxMTU = htonl(rxrpc_rx_mtu);
+ ackinfo.maxMTU = htonl(mtu);
+ ackinfo.rwind = htonl(rsize);
+ ackinfo.jumbo_max = htonl(jmax);
*ackp++ = 0;
*ackp++ = 0;
*ackp++ = 0;
- return top - hard_ack + 3;
+ memcpy(ackp, &ackinfo, sizeof(ackinfo));
+ return txb->ack.nAcks + 3 + sizeof(ackinfo);
}
/*
@@ -176,26 +199,20 @@ static void rxrpc_cancel_rtt_probe(struct rxrpc_call *call,
/*
* Send an ACK call packet.
*/
-int rxrpc_send_ack_packet(struct rxrpc_call *call, bool ping,
- rxrpc_serial_t *_serial)
+static int rxrpc_send_ack_packet(struct rxrpc_local *local, struct rxrpc_txbuf *txb)
{
struct rxrpc_connection *conn;
struct rxrpc_ack_buffer *pkt;
+ struct rxrpc_call *call = txb->call;
struct msghdr msg;
- struct kvec iov[2];
+ struct kvec iov[1];
rxrpc_serial_t serial;
- rxrpc_seq_t hard_ack, top;
size_t len, n;
int ret, rtt_slot = -1;
- u8 reason;
if (test_bit(RXRPC_CALL_DISCONNECTED, &call->flags))
return -ECONNRESET;
- pkt = kzalloc(sizeof(*pkt), GFP_KERNEL);
- if (!pkt)
- return -ENOMEM;
-
conn = call->conn;
msg.msg_name = &call->peer->srx.transport;
@@ -204,83 +221,98 @@ int rxrpc_send_ack_packet(struct rxrpc_call *call, bool ping,
msg.msg_controllen = 0;
msg.msg_flags = 0;
- pkt->whdr.epoch = htonl(conn->proto.epoch);
- pkt->whdr.cid = htonl(call->cid);
- pkt->whdr.callNumber = htonl(call->call_id);
- pkt->whdr.seq = 0;
- pkt->whdr.type = RXRPC_PACKET_TYPE_ACK;
- pkt->whdr.flags = RXRPC_SLOW_START_OK | conn->out_clientflag;
- pkt->whdr.userStatus = 0;
- pkt->whdr.securityIndex = call->security_ix;
- pkt->whdr._rsvd = 0;
- pkt->whdr.serviceId = htons(call->service_id);
-
- spin_lock_bh(&call->lock);
- if (ping) {
- reason = RXRPC_ACK_PING;
- } else {
- reason = call->ackr_reason;
- if (!call->ackr_reason) {
- spin_unlock_bh(&call->lock);
- ret = 0;
- goto out;
- }
- call->ackr_reason = 0;
- }
- n = rxrpc_fill_out_ack(conn, call, pkt, &hard_ack, &top, reason);
+ if (txb->ack.reason == RXRPC_ACK_PING)
+ txb->wire.flags |= RXRPC_REQUEST_ACK;
- spin_unlock_bh(&call->lock);
- if (n == 0) {
- kfree(pkt);
+ if (txb->ack.reason == RXRPC_ACK_DELAY)
+ clear_bit(RXRPC_CALL_DELAY_ACK_PENDING, &call->flags);
+ if (txb->ack.reason == RXRPC_ACK_IDLE)
+ clear_bit(RXRPC_CALL_IDLE_ACK_PENDING, &call->flags);
+
+ n = rxrpc_fill_out_ack(conn, call, txb);
+ if (n == 0)
return 0;
- }
- iov[0].iov_base = pkt;
- iov[0].iov_len = sizeof(pkt->whdr) + sizeof(pkt->ack) + n;
- iov[1].iov_base = &pkt->ackinfo;
- iov[1].iov_len = sizeof(pkt->ackinfo);
- len = iov[0].iov_len + iov[1].iov_len;
+ iov[0].iov_base = &txb->wire;
+ iov[0].iov_len = sizeof(txb->wire) + sizeof(txb->ack) + n;
+ len = iov[0].iov_len;
serial = atomic_inc_return(&conn->serial);
- pkt->whdr.serial = htonl(serial);
+ txb->wire.serial = htonl(serial);
trace_rxrpc_tx_ack(call->debug_id, serial,
- ntohl(pkt->ack.firstPacket),
- ntohl(pkt->ack.serial),
- pkt->ack.reason, pkt->ack.nAcks);
- if (_serial)
- *_serial = serial;
+ ntohl(txb->ack.firstPacket),
+ ntohl(txb->ack.serial), txb->ack.reason, txb->ack.nAcks);
+ if (txb->ack_why == rxrpc_propose_ack_ping_for_lost_ack)
+ call->acks_lost_ping = serial;
- if (ping)
+ if (txb->ack.reason == RXRPC_ACK_PING)
rtt_slot = rxrpc_begin_rtt_probe(call, serial, rxrpc_rtt_tx_ping);
- ret = kernel_sendmsg(conn->params.local->socket, &msg, iov, 2, len);
- conn->params.peer->last_tx_at = ktime_get_seconds();
+ rxrpc_inc_stat(call->rxnet, stat_tx_ack_send);
+
+ /* Grab the highest received seq as late as possible */
+ txb->ack.previousPacket = htonl(call->rx_highest_seq);
+
+ iov_iter_kvec(&msg.msg_iter, WRITE, iov, 1, len);
+ ret = do_udp_sendmsg(conn->params.local->socket, &msg, len);
+ call->peer->last_tx_at = ktime_get_seconds();
if (ret < 0)
trace_rxrpc_tx_fail(call->debug_id, serial, ret,
rxrpc_tx_point_call_ack);
else
- trace_rxrpc_tx_packet(call->debug_id, &pkt->whdr,
+ trace_rxrpc_tx_packet(call->debug_id, &txb->wire,
rxrpc_tx_point_call_ack);
rxrpc_tx_backoff(call, ret);
if (call->state < RXRPC_CALL_COMPLETE) {
- if (ret < 0) {
+ if (ret < 0)
rxrpc_cancel_rtt_probe(call, serial, rtt_slot);
- rxrpc_propose_ACK(call, pkt->ack.reason,
- ntohl(pkt->ack.serial),
- false, true,
- rxrpc_propose_ack_retry_tx);
- }
-
rxrpc_set_keepalive(call);
}
-out:
kfree(pkt);
return ret;
}
/*
+ * ACK transmitter for a local endpoint. The UDP socket locks around each
+ * transmission, so we can only transmit one packet at a time, ACK, DATA or
+ * otherwise.
+ */
+void rxrpc_transmit_ack_packets(struct rxrpc_local *local)
+{
+ LIST_HEAD(queue);
+ int ret;
+
+ trace_rxrpc_local(local->debug_id, rxrpc_local_tx_ack,
+ refcount_read(&local->ref), NULL);
+
+ if (list_empty(&local->ack_tx_queue))
+ return;
+
+ spin_lock_bh(&local->ack_tx_lock);
+ list_splice_tail_init(&local->ack_tx_queue, &queue);
+ spin_unlock_bh(&local->ack_tx_lock);
+
+ while (!list_empty(&queue)) {
+ struct rxrpc_txbuf *txb =
+ list_entry(queue.next, struct rxrpc_txbuf, tx_link);
+
+ ret = rxrpc_send_ack_packet(local, txb);
+ if (ret < 0 && ret != -ECONNRESET) {
+ spin_lock_bh(&local->ack_tx_lock);
+ list_splice_init(&queue, &local->ack_tx_queue);
+ spin_unlock_bh(&local->ack_tx_lock);
+ break;
+ }
+
+ list_del_init(&txb->tx_link);
+ rxrpc_put_call(txb->call, rxrpc_call_put);
+ rxrpc_put_txbuf(txb, rxrpc_txbuf_put_ack_tx);
+ }
+}
+
+/*
* Send an ABORT call packet.
*/
int rxrpc_send_abort_packet(struct rxrpc_call *call)
@@ -299,7 +331,7 @@ int rxrpc_send_abort_packet(struct rxrpc_call *call)
* channel instead, thereby closing off this call.
*/
if (rxrpc_is_client_call(call) &&
- test_bit(RXRPC_CALL_TX_LAST, &call->flags))
+ test_bit(RXRPC_CALL_TX_ALL_ACKED, &call->flags))
return 0;
if (test_bit(RXRPC_CALL_DISCONNECTED, &call->flags))
@@ -331,8 +363,8 @@ int rxrpc_send_abort_packet(struct rxrpc_call *call)
serial = atomic_inc_return(&conn->serial);
pkt.whdr.serial = htonl(serial);
- ret = kernel_sendmsg(conn->params.local->socket,
- &msg, iov, 1, sizeof(pkt));
+ iov_iter_kvec(&msg.msg_iter, WRITE, iov, 1, sizeof(pkt));
+ ret = do_udp_sendmsg(conn->params.local->socket, &msg, sizeof(pkt));
conn->params.peer->last_tx_at = ktime_get_seconds();
if (ret < 0)
trace_rxrpc_tx_fail(call->debug_id, serial, ret,
@@ -347,19 +379,17 @@ int rxrpc_send_abort_packet(struct rxrpc_call *call)
/*
* send a packet through the transport endpoint
*/
-int rxrpc_send_data_packet(struct rxrpc_call *call, struct sk_buff *skb,
- bool retrans)
+int rxrpc_send_data_packet(struct rxrpc_call *call, struct rxrpc_txbuf *txb)
{
+ enum rxrpc_req_ack_trace why;
struct rxrpc_connection *conn = call->conn;
- struct rxrpc_wire_header whdr;
- struct rxrpc_skb_priv *sp = rxrpc_skb(skb);
struct msghdr msg;
- struct kvec iov[2];
+ struct kvec iov[1];
rxrpc_serial_t serial;
size_t len;
int ret, rtt_slot = -1;
- _enter(",{%d}", skb->len);
+ _enter("%x,{%d}", txb->seq, txb->len);
if (hlist_unhashed(&call->error_link)) {
spin_lock_bh(&call->peer->lock);
@@ -369,28 +399,16 @@ int rxrpc_send_data_packet(struct rxrpc_call *call, struct sk_buff *skb,
/* Each transmission of a Tx packet needs a new serial number */
serial = atomic_inc_return(&conn->serial);
-
- whdr.epoch = htonl(conn->proto.epoch);
- whdr.cid = htonl(call->cid);
- whdr.callNumber = htonl(call->call_id);
- whdr.seq = htonl(sp->hdr.seq);
- whdr.serial = htonl(serial);
- whdr.type = RXRPC_PACKET_TYPE_DATA;
- whdr.flags = sp->hdr.flags;
- whdr.userStatus = 0;
- whdr.securityIndex = call->security_ix;
- whdr._rsvd = htons(sp->hdr._rsvd);
- whdr.serviceId = htons(call->service_id);
+ txb->wire.serial = htonl(serial);
if (test_bit(RXRPC_CONN_PROBING_FOR_UPGRADE, &conn->flags) &&
- sp->hdr.seq == 1)
- whdr.userStatus = RXRPC_USERSTATUS_SERVICE_UPGRADE;
+ txb->seq == 1)
+ txb->wire.userStatus = RXRPC_USERSTATUS_SERVICE_UPGRADE;
- iov[0].iov_base = &whdr;
- iov[0].iov_len = sizeof(whdr);
- iov[1].iov_base = skb->head;
- iov[1].iov_len = skb->len;
- len = iov[0].iov_len + iov[1].iov_len;
+ iov[0].iov_base = &txb->wire;
+ iov[0].iov_len = sizeof(txb->wire) + txb->len;
+ len = iov[0].iov_len;
+ iov_iter_kvec(&msg.msg_iter, WRITE, iov, 1, len);
msg.msg_name = &call->peer->srx.transport;
msg.msg_namelen = call->peer->srx.transport_len;
@@ -405,41 +423,56 @@ int rxrpc_send_data_packet(struct rxrpc_call *call, struct sk_buff *skb,
* service call, lest OpenAFS incorrectly send us an ACK with some
* soft-ACKs in it and then never follow up with a proper hard ACK.
*/
- if ((!(sp->hdr.flags & RXRPC_LAST_PACKET) ||
- rxrpc_to_server(sp)
- ) &&
- (test_and_clear_bit(RXRPC_CALL_EV_ACK_LOST, &call->events) ||
- retrans ||
- call->cong_mode == RXRPC_CALL_SLOW_START ||
- (call->peer->rtt_count < 3 && sp->hdr.seq & 1) ||
- ktime_before(ktime_add_ms(call->peer->rtt_last_req, 1000),
- ktime_get_real())))
- whdr.flags |= RXRPC_REQUEST_ACK;
+ if (txb->wire.flags & RXRPC_REQUEST_ACK)
+ why = rxrpc_reqack_already_on;
+ else if (test_bit(RXRPC_TXBUF_LAST, &txb->flags) && rxrpc_sending_to_client(txb))
+ why = rxrpc_reqack_no_srv_last;
+ else if (test_and_clear_bit(RXRPC_CALL_EV_ACK_LOST, &call->events))
+ why = rxrpc_reqack_ack_lost;
+ else if (test_bit(RXRPC_TXBUF_RESENT, &txb->flags))
+ why = rxrpc_reqack_retrans;
+ else if (call->cong_mode == RXRPC_CALL_SLOW_START && call->cong_cwnd <= 2)
+ why = rxrpc_reqack_slow_start;
+ else if (call->tx_winsize <= 2)
+ why = rxrpc_reqack_small_txwin;
+ else if (call->peer->rtt_count < 3 && txb->seq & 1)
+ why = rxrpc_reqack_more_rtt;
+ else if (ktime_before(ktime_add_ms(call->peer->rtt_last_req, 1000), ktime_get_real()))
+ why = rxrpc_reqack_old_rtt;
+ else
+ goto dont_set_request_ack;
+
+ rxrpc_inc_stat(call->rxnet, stat_why_req_ack[why]);
+ trace_rxrpc_req_ack(call->debug_id, txb->seq, why);
+ if (why != rxrpc_reqack_no_srv_last)
+ txb->wire.flags |= RXRPC_REQUEST_ACK;
+dont_set_request_ack:
if (IS_ENABLED(CONFIG_AF_RXRPC_INJECT_LOSS)) {
static int lose;
if ((lose++ & 7) == 7) {
ret = 0;
- trace_rxrpc_tx_data(call, sp->hdr.seq, serial,
- whdr.flags, retrans, true);
+ trace_rxrpc_tx_data(call, txb->seq, serial,
+ txb->wire.flags,
+ test_bit(RXRPC_TXBUF_RESENT, &txb->flags),
+ true);
goto done;
}
}
- trace_rxrpc_tx_data(call, sp->hdr.seq, serial, whdr.flags, retrans,
- false);
+ trace_rxrpc_tx_data(call, txb->seq, serial, txb->wire.flags,
+ test_bit(RXRPC_TXBUF_RESENT, &txb->flags), false);
+ cmpxchg(&call->tx_transmitted, txb->seq - 1, txb->seq);
/* send the packet with the don't fragment bit set if we currently
* think it's small enough */
- if (iov[1].iov_len >= call->peer->maxdata)
+ if (txb->len >= call->peer->maxdata)
goto send_fragmentable;
down_read(&conn->params.local->defrag_sem);
- sp->hdr.serial = serial;
- smp_wmb(); /* Set serial before timestamp */
- skb->tstamp = ktime_get_real();
- if (whdr.flags & RXRPC_REQUEST_ACK)
+ txb->last_sent = ktime_get_real();
+ if (txb->wire.flags & RXRPC_REQUEST_ACK)
rtt_slot = rxrpc_begin_rtt_probe(call, serial, rxrpc_rtt_tx_data);
/* send the packet by UDP
@@ -448,7 +481,8 @@ int rxrpc_send_data_packet(struct rxrpc_call *call, struct sk_buff *skb,
* - in which case, we'll have processed the ICMP error
* message and update the peer record
*/
- ret = kernel_sendmsg(conn->params.local->socket, &msg, iov, 2, len);
+ rxrpc_inc_stat(call->rxnet, stat_tx_data_send);
+ ret = do_udp_sendmsg(conn->params.local->socket, &msg, len);
conn->params.peer->last_tx_at = ktime_get_seconds();
up_read(&conn->params.local->defrag_sem);
@@ -457,7 +491,7 @@ int rxrpc_send_data_packet(struct rxrpc_call *call, struct sk_buff *skb,
trace_rxrpc_tx_fail(call->debug_id, serial, ret,
rxrpc_tx_point_call_data_nofrag);
} else {
- trace_rxrpc_tx_packet(call->debug_id, &whdr,
+ trace_rxrpc_tx_packet(call->debug_id, &txb->wire,
rxrpc_tx_point_call_data_nofrag);
}
@@ -467,8 +501,9 @@ int rxrpc_send_data_packet(struct rxrpc_call *call, struct sk_buff *skb,
done:
if (ret >= 0) {
- if (whdr.flags & RXRPC_REQUEST_ACK) {
- call->peer->rtt_last_req = skb->tstamp;
+ call->tx_last_sent = txb->last_sent;
+ if (txb->wire.flags & RXRPC_REQUEST_ACK) {
+ call->peer->rtt_last_req = txb->last_sent;
if (call->peer->rtt_count > 1) {
unsigned long nowj = jiffies, ack_lost_at;
@@ -480,7 +515,7 @@ done:
}
}
- if (sp->hdr.seq == 1 &&
+ if (txb->seq == 1 &&
!test_and_set_bit(RXRPC_CALL_BEGAN_RX_TIMER,
&call->flags)) {
unsigned long nowj = jiffies, expect_rx_by;
@@ -512,23 +547,21 @@ send_fragmentable:
down_write(&conn->params.local->defrag_sem);
- sp->hdr.serial = serial;
- smp_wmb(); /* Set serial before timestamp */
- skb->tstamp = ktime_get_real();
- if (whdr.flags & RXRPC_REQUEST_ACK)
+ txb->last_sent = ktime_get_real();
+ if (txb->wire.flags & RXRPC_REQUEST_ACK)
rtt_slot = rxrpc_begin_rtt_probe(call, serial, rxrpc_rtt_tx_data);
switch (conn->params.local->srx.transport.family) {
case AF_INET6:
case AF_INET:
ip_sock_set_mtu_discover(conn->params.local->socket->sk,
- IP_PMTUDISC_DONT);
- ret = kernel_sendmsg(conn->params.local->socket, &msg,
- iov, 2, len);
+ IP_PMTUDISC_DONT);
+ rxrpc_inc_stat(call->rxnet, stat_tx_data_send_frag);
+ ret = do_udp_sendmsg(conn->params.local->socket, &msg, len);
conn->params.peer->last_tx_at = ktime_get_seconds();
ip_sock_set_mtu_discover(conn->params.local->socket->sk,
- IP_PMTUDISC_DO);
+ IP_PMTUDISC_DO);
break;
default:
@@ -540,7 +573,7 @@ send_fragmentable:
trace_rxrpc_tx_fail(call->debug_id, serial, ret,
rxrpc_tx_point_call_data_frag);
} else {
- trace_rxrpc_tx_packet(call->debug_id, &whdr,
+ trace_rxrpc_tx_packet(call->debug_id, &txb->wire,
rxrpc_tx_point_call_data_frag);
}
rxrpc_tx_backoff(call, ret);
@@ -610,8 +643,8 @@ void rxrpc_reject_packets(struct rxrpc_local *local)
whdr.flags ^= RXRPC_CLIENT_INITIATED;
whdr.flags &= RXRPC_CLIENT_INITIATED;
- ret = kernel_sendmsg(local->socket, &msg,
- iov, ioc, size);
+ iov_iter_kvec(&msg.msg_iter, WRITE, iov, ioc, size);
+ ret = do_udp_sendmsg(local->socket, &msg, size);
if (ret < 0)
trace_rxrpc_tx_fail(local->debug_id, 0, ret,
rxrpc_tx_point_reject);
@@ -666,7 +699,8 @@ void rxrpc_send_keepalive(struct rxrpc_peer *peer)
_proto("Tx VERSION (keepalive)");
- ret = kernel_sendmsg(peer->local->socket, &msg, iov, 2, len);
+ iov_iter_kvec(&msg.msg_iter, WRITE, iov, 2, len);
+ ret = do_udp_sendmsg(peer->local->socket, &msg, len);
if (ret < 0)
trace_rxrpc_tx_fail(peer->debug_id, 0, ret,
rxrpc_tx_point_version_keepalive);
diff --git a/net/rxrpc/peer_event.c b/net/rxrpc/peer_event.c
index 32561e9567fe..cda3890657a9 100644
--- a/net/rxrpc/peer_event.c
+++ b/net/rxrpc/peer_event.c
@@ -16,258 +16,13 @@
#include <net/sock.h>
#include <net/af_rxrpc.h>
#include <net/ip.h>
-#include <net/icmp.h>
#include "ar-internal.h"
-static void rxrpc_adjust_mtu(struct rxrpc_peer *, unsigned int);
static void rxrpc_store_error(struct rxrpc_peer *, struct sock_exterr_skb *);
static void rxrpc_distribute_error(struct rxrpc_peer *, int,
enum rxrpc_call_completion);
/*
- * Find the peer associated with an ICMPv4 packet.
- */
-static struct rxrpc_peer *rxrpc_lookup_peer_icmp_rcu(struct rxrpc_local *local,
- struct sk_buff *skb,
- unsigned int udp_offset,
- unsigned int *info,
- struct sockaddr_rxrpc *srx)
-{
- struct iphdr *ip, *ip0 = ip_hdr(skb);
- struct icmphdr *icmp = icmp_hdr(skb);
- struct udphdr *udp = (struct udphdr *)(skb->data + udp_offset);
-
- _enter("%u,%u,%u", ip0->protocol, icmp->type, icmp->code);
-
- switch (icmp->type) {
- case ICMP_DEST_UNREACH:
- *info = ntohs(icmp->un.frag.mtu);
- fallthrough;
- case ICMP_TIME_EXCEEDED:
- case ICMP_PARAMETERPROB:
- ip = (struct iphdr *)((void *)icmp + 8);
- break;
- default:
- return NULL;
- }
-
- memset(srx, 0, sizeof(*srx));
- srx->transport_type = local->srx.transport_type;
- srx->transport_len = local->srx.transport_len;
- srx->transport.family = local->srx.transport.family;
-
- /* Can we see an ICMP4 packet on an ICMP6 listening socket? and vice
- * versa?
- */
- switch (srx->transport.family) {
- case AF_INET:
- srx->transport_len = sizeof(srx->transport.sin);
- srx->transport.family = AF_INET;
- srx->transport.sin.sin_port = udp->dest;
- memcpy(&srx->transport.sin.sin_addr, &ip->daddr,
- sizeof(struct in_addr));
- break;
-
-#ifdef CONFIG_AF_RXRPC_IPV6
- case AF_INET6:
- srx->transport_len = sizeof(srx->transport.sin);
- srx->transport.family = AF_INET;
- srx->transport.sin.sin_port = udp->dest;
- memcpy(&srx->transport.sin.sin_addr, &ip->daddr,
- sizeof(struct in_addr));
- break;
-#endif
-
- default:
- WARN_ON_ONCE(1);
- return NULL;
- }
-
- _net("ICMP {%pISp}", &srx->transport);
- return rxrpc_lookup_peer_rcu(local, srx);
-}
-
-#ifdef CONFIG_AF_RXRPC_IPV6
-/*
- * Find the peer associated with an ICMPv6 packet.
- */
-static struct rxrpc_peer *rxrpc_lookup_peer_icmp6_rcu(struct rxrpc_local *local,
- struct sk_buff *skb,
- unsigned int udp_offset,
- unsigned int *info,
- struct sockaddr_rxrpc *srx)
-{
- struct icmp6hdr *icmp = icmp6_hdr(skb);
- struct ipv6hdr *ip, *ip0 = ipv6_hdr(skb);
- struct udphdr *udp = (struct udphdr *)(skb->data + udp_offset);
-
- _enter("%u,%u,%u", ip0->nexthdr, icmp->icmp6_type, icmp->icmp6_code);
-
- switch (icmp->icmp6_type) {
- case ICMPV6_DEST_UNREACH:
- *info = ntohl(icmp->icmp6_mtu);
- fallthrough;
- case ICMPV6_PKT_TOOBIG:
- case ICMPV6_TIME_EXCEED:
- case ICMPV6_PARAMPROB:
- ip = (struct ipv6hdr *)((void *)icmp + 8);
- break;
- default:
- return NULL;
- }
-
- memset(srx, 0, sizeof(*srx));
- srx->transport_type = local->srx.transport_type;
- srx->transport_len = local->srx.transport_len;
- srx->transport.family = local->srx.transport.family;
-
- /* Can we see an ICMP4 packet on an ICMP6 listening socket? and vice
- * versa?
- */
- switch (srx->transport.family) {
- case AF_INET:
- _net("Rx ICMP6 on v4 sock");
- srx->transport_len = sizeof(srx->transport.sin);
- srx->transport.family = AF_INET;
- srx->transport.sin.sin_port = udp->dest;
- memcpy(&srx->transport.sin.sin_addr,
- &ip->daddr.s6_addr32[3], sizeof(struct in_addr));
- break;
- case AF_INET6:
- _net("Rx ICMP6");
- srx->transport.sin.sin_port = udp->dest;
- memcpy(&srx->transport.sin6.sin6_addr, &ip->daddr,
- sizeof(struct in6_addr));
- break;
- default:
- WARN_ON_ONCE(1);
- return NULL;
- }
-
- _net("ICMP {%pISp}", &srx->transport);
- return rxrpc_lookup_peer_rcu(local, srx);
-}
-#endif /* CONFIG_AF_RXRPC_IPV6 */
-
-/*
- * Handle an error received on the local endpoint as a tunnel.
- */
-void rxrpc_encap_err_rcv(struct sock *sk, struct sk_buff *skb,
- unsigned int udp_offset)
-{
- struct sock_extended_err ee;
- struct sockaddr_rxrpc srx;
- struct rxrpc_local *local;
- struct rxrpc_peer *peer;
- unsigned int info = 0;
- int err;
- u8 version = ip_hdr(skb)->version;
- u8 type = icmp_hdr(skb)->type;
- u8 code = icmp_hdr(skb)->code;
-
- rcu_read_lock();
- local = rcu_dereference_sk_user_data(sk);
- if (unlikely(!local)) {
- rcu_read_unlock();
- return;
- }
-
- rxrpc_new_skb(skb, rxrpc_skb_received);
-
- switch (ip_hdr(skb)->version) {
- case IPVERSION:
- peer = rxrpc_lookup_peer_icmp_rcu(local, skb, udp_offset,
- &info, &srx);
- break;
-#ifdef CONFIG_AF_RXRPC_IPV6
- case 6:
- peer = rxrpc_lookup_peer_icmp6_rcu(local, skb, udp_offset,
- &info, &srx);
- break;
-#endif
- default:
- rcu_read_unlock();
- return;
- }
-
- if (peer && !rxrpc_get_peer_maybe(peer))
- peer = NULL;
- if (!peer) {
- rcu_read_unlock();
- return;
- }
-
- memset(&ee, 0, sizeof(ee));
-
- switch (version) {
- case IPVERSION:
- switch (type) {
- case ICMP_DEST_UNREACH:
- switch (code) {
- case ICMP_FRAG_NEEDED:
- rxrpc_adjust_mtu(peer, info);
- rcu_read_unlock();
- rxrpc_put_peer(peer);
- return;
- default:
- break;
- }
-
- err = EHOSTUNREACH;
- if (code <= NR_ICMP_UNREACH) {
- /* Might want to do something different with
- * non-fatal errors
- */
- //harderr = icmp_err_convert[code].fatal;
- err = icmp_err_convert[code].errno;
- }
- break;
-
- case ICMP_TIME_EXCEEDED:
- err = EHOSTUNREACH;
- break;
- default:
- err = EPROTO;
- break;
- }
-
- ee.ee_origin = SO_EE_ORIGIN_ICMP;
- ee.ee_type = type;
- ee.ee_code = code;
- ee.ee_errno = err;
- break;
-
-#ifdef CONFIG_AF_RXRPC_IPV6
- case 6:
- switch (type) {
- case ICMPV6_PKT_TOOBIG:
- rxrpc_adjust_mtu(peer, info);
- rcu_read_unlock();
- rxrpc_put_peer(peer);
- return;
- }
-
- icmpv6_err_convert(type, code, &err);
-
- if (err == EACCES)
- err = EHOSTUNREACH;
-
- ee.ee_origin = SO_EE_ORIGIN_ICMP6;
- ee.ee_type = type;
- ee.ee_code = code;
- ee.ee_errno = err;
- break;
-#endif
- }
-
- trace_rxrpc_rx_icmp(peer, &ee, &srx);
-
- rxrpc_distribute_error(peer, err, RXRPC_CALL_NETWORK_ERROR);
- rcu_read_unlock();
- rxrpc_put_peer(peer);
-}
-
-/*
* Find the peer associated with a local error.
*/
static struct rxrpc_peer *rxrpc_lookup_peer_local_rcu(struct rxrpc_local *local,
@@ -283,6 +38,9 @@ static struct rxrpc_peer *rxrpc_lookup_peer_local_rcu(struct rxrpc_local *local,
srx->transport_len = local->srx.transport_len;
srx->transport.family = local->srx.transport.family;
+ /* Can we see an ICMP4 packet on an ICMP6 listening socket? and vice
+ * versa?
+ */
switch (srx->transport.family) {
case AF_INET:
srx->transport_len = sizeof(srx->transport.sin);
@@ -412,20 +170,38 @@ void rxrpc_error_report(struct sock *sk)
}
rxrpc_new_skb(skb, rxrpc_skb_received);
serr = SKB_EXT_ERR(skb);
+ if (!skb->len && serr->ee.ee_origin == SO_EE_ORIGIN_TIMESTAMPING) {
+ _leave("UDP empty message");
+ rcu_read_unlock();
+ rxrpc_free_skb(skb, rxrpc_skb_freed);
+ return;
+ }
- if (serr->ee.ee_origin == SO_EE_ORIGIN_LOCAL) {
- peer = rxrpc_lookup_peer_local_rcu(local, skb, &srx);
- if (peer && !rxrpc_get_peer_maybe(peer))
- peer = NULL;
- if (peer) {
- trace_rxrpc_rx_icmp(peer, &serr->ee, &srx);
- rxrpc_store_error(peer, serr);
- }
+ peer = rxrpc_lookup_peer_local_rcu(local, skb, &srx);
+ if (peer && !rxrpc_get_peer_maybe(peer))
+ peer = NULL;
+ if (!peer) {
+ rcu_read_unlock();
+ rxrpc_free_skb(skb, rxrpc_skb_freed);
+ _leave(" [no peer]");
+ return;
}
+ trace_rxrpc_rx_icmp(peer, &serr->ee, &srx);
+
+ if ((serr->ee.ee_origin == SO_EE_ORIGIN_ICMP &&
+ serr->ee.ee_type == ICMP_DEST_UNREACH &&
+ serr->ee.ee_code == ICMP_FRAG_NEEDED)) {
+ rxrpc_adjust_mtu(peer, serr->ee.ee_info);
+ goto out;
+ }
+
+ rxrpc_store_error(peer, serr);
+out:
rcu_read_unlock();
rxrpc_free_skb(skb, rxrpc_skb_freed);
rxrpc_put_peer(peer);
+
_leave("");
}
diff --git a/net/rxrpc/peer_object.c b/net/rxrpc/peer_object.c
index 26d2ae9baaf2..041a51225c5f 100644
--- a/net/rxrpc/peer_object.c
+++ b/net/rxrpc/peer_object.c
@@ -227,12 +227,7 @@ struct rxrpc_peer *rxrpc_alloc_peer(struct rxrpc_local *local, gfp_t gfp)
rxrpc_peer_init_rtt(peer);
- if (RXRPC_TX_SMSS > 2190)
- peer->cong_cwnd = 2;
- else if (RXRPC_TX_SMSS > 1095)
- peer->cong_cwnd = 3;
- else
- peer->cong_cwnd = 4;
+ peer->cong_ssthresh = RXRPC_TX_MAX_WINDOW;
trace_rxrpc_peer(peer->debug_id, rxrpc_peer_new, 1, here);
}
diff --git a/net/rxrpc/proc.c b/net/rxrpc/proc.c
index 245418943e01..fae22a8b38d6 100644
--- a/net/rxrpc/proc.c
+++ b/net/rxrpc/proc.c
@@ -54,8 +54,9 @@ static int rxrpc_call_seq_show(struct seq_file *seq, void *v)
struct rxrpc_call *call;
struct rxrpc_net *rxnet = rxrpc_net(seq_file_net(seq));
unsigned long timeout = 0;
- rxrpc_seq_t tx_hard_ack, rx_hard_ack;
+ rxrpc_seq_t acks_hard_ack;
char lbuff[50], rbuff[50];
+ u64 wtmp;
if (v == &rxnet->calls) {
seq_puts(seq,
@@ -90,8 +91,8 @@ static int rxrpc_call_seq_show(struct seq_file *seq, void *v)
timeout -= jiffies;
}
- tx_hard_ack = READ_ONCE(call->tx_hard_ack);
- rx_hard_ack = READ_ONCE(call->rx_hard_ack);
+ acks_hard_ack = READ_ONCE(call->acks_hard_ack);
+ wtmp = atomic64_read_acquire(&call->ackr_window);
seq_printf(seq,
"UDP %-47.47s %-47.47s %4x %08x %08x %s %3u"
" %-8.8s %08x %08x %08x %02x %08x %02x %08x %06lx\n",
@@ -105,8 +106,8 @@ static int rxrpc_call_seq_show(struct seq_file *seq, void *v)
rxrpc_call_states[call->state],
call->abort_code,
call->debug_id,
- tx_hard_ack, READ_ONCE(call->tx_top) - tx_hard_ack,
- rx_hard_ack, READ_ONCE(call->rx_top) - rx_hard_ack,
+ acks_hard_ack, READ_ONCE(call->tx_top) - acks_hard_ack,
+ lower_32_bits(wtmp), upper_32_bits(wtmp) - lower_32_bits(wtmp),
call->rx_serial,
timeout);
@@ -216,7 +217,7 @@ static int rxrpc_peer_seq_show(struct seq_file *seq, void *v)
seq_puts(seq,
"Proto Local "
" Remote "
- " Use CW MTU LastUse RTT RTO\n"
+ " Use SST MTU LastUse RTT RTO\n"
);
return 0;
}
@@ -234,7 +235,7 @@ static int rxrpc_peer_seq_show(struct seq_file *seq, void *v)
lbuff,
rbuff,
refcount_read(&peer->ref),
- peer->cong_cwnd,
+ peer->cong_ssthresh,
peer->mtu,
now - peer->last_tx_at,
peer->srtt_us >> 3,
@@ -397,3 +398,98 @@ const struct seq_operations rxrpc_local_seq_ops = {
.stop = rxrpc_local_seq_stop,
.show = rxrpc_local_seq_show,
};
+
+/*
+ * Display stats in /proc/net/rxrpc/stats
+ */
+int rxrpc_stats_show(struct seq_file *seq, void *v)
+{
+ struct rxrpc_net *rxnet = rxrpc_net(seq_file_single_net(seq));
+
+ seq_printf(seq,
+ "Data : send=%u sendf=%u\n",
+ atomic_read(&rxnet->stat_tx_data_send),
+ atomic_read(&rxnet->stat_tx_data_send_frag));
+ seq_printf(seq,
+ "Data-Tx : nr=%u retrans=%u\n",
+ atomic_read(&rxnet->stat_tx_data),
+ atomic_read(&rxnet->stat_tx_data_retrans));
+ seq_printf(seq,
+ "Data-Rx : nr=%u reqack=%u jumbo=%u\n",
+ atomic_read(&rxnet->stat_rx_data),
+ atomic_read(&rxnet->stat_rx_data_reqack),
+ atomic_read(&rxnet->stat_rx_data_jumbo));
+ seq_printf(seq,
+ "Ack : fill=%u send=%u skip=%u\n",
+ atomic_read(&rxnet->stat_tx_ack_fill),
+ atomic_read(&rxnet->stat_tx_ack_send),
+ atomic_read(&rxnet->stat_tx_ack_skip));
+ seq_printf(seq,
+ "Ack-Tx : req=%u dup=%u oos=%u exw=%u nos=%u png=%u prs=%u dly=%u idl=%u\n",
+ atomic_read(&rxnet->stat_tx_acks[RXRPC_ACK_REQUESTED]),
+ atomic_read(&rxnet->stat_tx_acks[RXRPC_ACK_DUPLICATE]),
+ atomic_read(&rxnet->stat_tx_acks[RXRPC_ACK_OUT_OF_SEQUENCE]),
+ atomic_read(&rxnet->stat_tx_acks[RXRPC_ACK_EXCEEDS_WINDOW]),
+ atomic_read(&rxnet->stat_tx_acks[RXRPC_ACK_NOSPACE]),
+ atomic_read(&rxnet->stat_tx_acks[RXRPC_ACK_PING]),
+ atomic_read(&rxnet->stat_tx_acks[RXRPC_ACK_PING_RESPONSE]),
+ atomic_read(&rxnet->stat_tx_acks[RXRPC_ACK_DELAY]),
+ atomic_read(&rxnet->stat_tx_acks[RXRPC_ACK_IDLE]));
+ seq_printf(seq,
+ "Ack-Rx : req=%u dup=%u oos=%u exw=%u nos=%u png=%u prs=%u dly=%u idl=%u\n",
+ atomic_read(&rxnet->stat_rx_acks[RXRPC_ACK_REQUESTED]),
+ atomic_read(&rxnet->stat_rx_acks[RXRPC_ACK_DUPLICATE]),
+ atomic_read(&rxnet->stat_rx_acks[RXRPC_ACK_OUT_OF_SEQUENCE]),
+ atomic_read(&rxnet->stat_rx_acks[RXRPC_ACK_EXCEEDS_WINDOW]),
+ atomic_read(&rxnet->stat_rx_acks[RXRPC_ACK_NOSPACE]),
+ atomic_read(&rxnet->stat_rx_acks[RXRPC_ACK_PING]),
+ atomic_read(&rxnet->stat_rx_acks[RXRPC_ACK_PING_RESPONSE]),
+ atomic_read(&rxnet->stat_rx_acks[RXRPC_ACK_DELAY]),
+ atomic_read(&rxnet->stat_rx_acks[RXRPC_ACK_IDLE]));
+ seq_printf(seq,
+ "Why-Req-A: acklost=%u already=%u mrtt=%u ortt=%u\n",
+ atomic_read(&rxnet->stat_why_req_ack[rxrpc_reqack_ack_lost]),
+ atomic_read(&rxnet->stat_why_req_ack[rxrpc_reqack_already_on]),
+ atomic_read(&rxnet->stat_why_req_ack[rxrpc_reqack_more_rtt]),
+ atomic_read(&rxnet->stat_why_req_ack[rxrpc_reqack_old_rtt]));
+ seq_printf(seq,
+ "Why-Req-A: nolast=%u retx=%u slows=%u smtxw=%u\n",
+ atomic_read(&rxnet->stat_why_req_ack[rxrpc_reqack_no_srv_last]),
+ atomic_read(&rxnet->stat_why_req_ack[rxrpc_reqack_retrans]),
+ atomic_read(&rxnet->stat_why_req_ack[rxrpc_reqack_slow_start]),
+ atomic_read(&rxnet->stat_why_req_ack[rxrpc_reqack_small_txwin]));
+ seq_printf(seq,
+ "Buffers : txb=%u rxb=%u\n",
+ atomic_read(&rxrpc_nr_txbuf),
+ atomic_read(&rxrpc_n_rx_skbs));
+ return 0;
+}
+
+/*
+ * Clear stats if /proc/net/rxrpc/stats is written to.
+ */
+int rxrpc_stats_clear(struct file *file, char *buf, size_t size)
+{
+ struct seq_file *m = file->private_data;
+ struct rxrpc_net *rxnet = rxrpc_net(seq_file_single_net(m));
+
+ if (size > 1 || (size == 1 && buf[0] != '\n'))
+ return -EINVAL;
+
+ atomic_set(&rxnet->stat_tx_data, 0);
+ atomic_set(&rxnet->stat_tx_data_retrans, 0);
+ atomic_set(&rxnet->stat_tx_data_send, 0);
+ atomic_set(&rxnet->stat_tx_data_send_frag, 0);
+ atomic_set(&rxnet->stat_rx_data, 0);
+ atomic_set(&rxnet->stat_rx_data_reqack, 0);
+ atomic_set(&rxnet->stat_rx_data_jumbo, 0);
+
+ atomic_set(&rxnet->stat_tx_ack_fill, 0);
+ atomic_set(&rxnet->stat_tx_ack_send, 0);
+ atomic_set(&rxnet->stat_tx_ack_skip, 0);
+ memset(&rxnet->stat_tx_acks, 0, sizeof(rxnet->stat_tx_acks));
+ memset(&rxnet->stat_rx_acks, 0, sizeof(rxnet->stat_rx_acks));
+
+ memset(&rxnet->stat_why_req_ack, 0, sizeof(rxnet->stat_why_req_ack));
+ return size;
+}
diff --git a/net/rxrpc/protocol.h b/net/rxrpc/protocol.h
index d2cf8e1d218f..6760cb99c6d6 100644
--- a/net/rxrpc/protocol.h
+++ b/net/rxrpc/protocol.h
@@ -84,7 +84,7 @@ struct rxrpc_jumbo_header {
__be16 _rsvd; /* reserved */
__be16 cksum; /* kerberos security checksum */
};
-};
+} __packed;
#define RXRPC_JUMBO_DATALEN 1412 /* non-terminal jumbo packet data length */
#define RXRPC_JUMBO_SUBPKTLEN (RXRPC_JUMBO_DATALEN + sizeof(struct rxrpc_jumbo_header))
@@ -132,13 +132,6 @@ struct rxrpc_ackpacket {
} __packed;
-/* Some ACKs refer to specific packets and some are general and can be updated. */
-#define RXRPC_ACK_UPDATEABLE ((1 << RXRPC_ACK_REQUESTED) | \
- (1 << RXRPC_ACK_PING_RESPONSE) | \
- (1 << RXRPC_ACK_DELAY) | \
- (1 << RXRPC_ACK_IDLE))
-
-
/*
* ACK packets can have a further piece of information tagged on the end
*/
diff --git a/net/rxrpc/recvmsg.c b/net/rxrpc/recvmsg.c
index 7e39c262fd79..efb85f983657 100644
--- a/net/rxrpc/recvmsg.c
+++ b/net/rxrpc/recvmsg.c
@@ -173,8 +173,9 @@ static int rxrpc_recvmsg_term(struct rxrpc_call *call, struct msghdr *msg)
break;
}
- trace_rxrpc_recvmsg(call, rxrpc_recvmsg_terminal, call->rx_hard_ack,
- call->rx_pkt_offset, call->rx_pkt_len, ret);
+ trace_rxrpc_recvdata(call, rxrpc_recvmsg_terminal,
+ lower_32_bits(atomic64_read(&call->ackr_window)) - 1,
+ call->rx_pkt_offset, call->rx_pkt_len, ret);
return ret;
}
@@ -183,16 +184,14 @@ static int rxrpc_recvmsg_term(struct rxrpc_call *call, struct msghdr *msg)
*/
static void rxrpc_end_rx_phase(struct rxrpc_call *call, rxrpc_serial_t serial)
{
+ rxrpc_seq_t whigh = READ_ONCE(call->rx_highest_seq);
+
_enter("%d,%s", call->debug_id, rxrpc_call_states[call->state]);
- trace_rxrpc_receive(call, rxrpc_receive_end, 0, call->rx_top);
- ASSERTCMP(call->rx_hard_ack, ==, call->rx_top);
+ trace_rxrpc_receive(call, rxrpc_receive_end, 0, whigh);
- if (call->state == RXRPC_CALL_CLIENT_RECV_REPLY) {
- rxrpc_propose_ACK(call, RXRPC_ACK_IDLE, serial, false, true,
- rxrpc_propose_ack_terminal_ack);
- //rxrpc_send_ack_packet(call, false, NULL);
- }
+ if (call->state == RXRPC_CALL_CLIENT_RECV_REPLY)
+ rxrpc_propose_delay_ACK(call, serial, rxrpc_propose_ack_terminal_ack);
write_lock_bh(&call->state_lock);
@@ -203,12 +202,11 @@ static void rxrpc_end_rx_phase(struct rxrpc_call *call, rxrpc_serial_t serial)
break;
case RXRPC_CALL_SERVER_RECV_REQUEST:
- call->tx_phase = true;
call->state = RXRPC_CALL_SERVER_ACK_REQUEST;
call->expect_req_by = jiffies + MAX_JIFFY_OFFSET;
write_unlock_bh(&call->state_lock);
- rxrpc_propose_ACK(call, RXRPC_ACK_DELAY, serial, false, true,
- rxrpc_propose_ack_processing_op);
+ rxrpc_propose_delay_ACK(call, serial,
+ rxrpc_propose_ack_processing_op);
break;
default:
write_unlock_bh(&call->state_lock);
@@ -224,126 +222,66 @@ static void rxrpc_rotate_rx_window(struct rxrpc_call *call)
struct rxrpc_skb_priv *sp;
struct sk_buff *skb;
rxrpc_serial_t serial;
- rxrpc_seq_t hard_ack, top;
- bool last = false;
- u8 subpacket;
- int ix;
+ rxrpc_seq_t old_consumed = call->rx_consumed, tseq;
+ bool last;
+ int acked;
_enter("%d", call->debug_id);
- hard_ack = call->rx_hard_ack;
- top = smp_load_acquire(&call->rx_top);
- ASSERT(before(hard_ack, top));
-
- hard_ack++;
- ix = hard_ack & RXRPC_RXTX_BUFF_MASK;
- skb = call->rxtx_buffer[ix];
+further_rotation:
+ skb = skb_dequeue(&call->recvmsg_queue);
rxrpc_see_skb(skb, rxrpc_skb_rotated);
- sp = rxrpc_skb(skb);
-
- subpacket = call->rxtx_annotations[ix] & RXRPC_RX_ANNO_SUBPACKET;
- serial = sp->hdr.serial + subpacket;
- if (subpacket == sp->nr_subpackets - 1 &&
- sp->rx_flags & RXRPC_SKB_INCL_LAST)
- last = true;
+ sp = rxrpc_skb(skb);
+ tseq = sp->hdr.seq;
+ serial = sp->hdr.serial;
+ last = sp->hdr.flags & RXRPC_LAST_PACKET;
- call->rxtx_buffer[ix] = NULL;
- call->rxtx_annotations[ix] = 0;
/* Barrier against rxrpc_input_data(). */
- smp_store_release(&call->rx_hard_ack, hard_ack);
+ if (after(tseq, call->rx_consumed))
+ smp_store_release(&call->rx_consumed, tseq);
rxrpc_free_skb(skb, rxrpc_skb_freed);
- trace_rxrpc_receive(call, rxrpc_receive_rotate, serial, hard_ack);
+ trace_rxrpc_receive(call, last ? rxrpc_receive_rotate_last : rxrpc_receive_rotate,
+ serial, call->rx_consumed);
if (last) {
rxrpc_end_rx_phase(call, serial);
- } else {
- /* Check to see if there's an ACK that needs sending. */
- if (atomic_inc_return(&call->ackr_nr_consumed) > 2)
- rxrpc_propose_ACK(call, RXRPC_ACK_IDLE, serial,
- true, false,
- rxrpc_propose_ack_rotate_rx);
- if (call->ackr_reason && call->ackr_reason != RXRPC_ACK_DELAY)
- rxrpc_send_ack_packet(call, false, NULL);
+ return;
}
-}
-
-/*
- * Decrypt and verify a (sub)packet. The packet's length may be changed due to
- * padding, but if this is the case, the packet length will be resident in the
- * socket buffer. Note that we can't modify the master skb info as the skb may
- * be the home to multiple subpackets.
- */
-static int rxrpc_verify_packet(struct rxrpc_call *call, struct sk_buff *skb,
- u8 annotation,
- unsigned int offset, unsigned int len)
-{
- struct rxrpc_skb_priv *sp = rxrpc_skb(skb);
- rxrpc_seq_t seq = sp->hdr.seq;
- u16 cksum = sp->hdr.cksum;
- u8 subpacket = annotation & RXRPC_RX_ANNO_SUBPACKET;
- _enter("");
-
- /* For all but the head jumbo subpacket, the security checksum is in a
- * jumbo header immediately prior to the data.
+ /* The next packet on the queue might entirely overlap with the one we
+ * just consumed; if so, rotate that away also.
*/
- if (subpacket > 0) {
- __be16 tmp;
- if (skb_copy_bits(skb, offset - 2, &tmp, 2) < 0)
- BUG();
- cksum = ntohs(tmp);
- seq += subpacket;
+ skb = skb_peek(&call->recvmsg_queue);
+ if (skb) {
+ sp = rxrpc_skb(skb);
+ if (sp->hdr.seq != call->rx_consumed &&
+ after_eq(call->rx_consumed, sp->hdr.seq))
+ goto further_rotation;
}
- return call->security->verify_packet(call, skb, offset, len,
- seq, cksum);
+ /* Check to see if there's an ACK that needs sending. */
+ acked = atomic_add_return(call->rx_consumed - old_consumed,
+ &call->ackr_nr_consumed);
+ if (acked > 2 &&
+ !test_and_set_bit(RXRPC_CALL_IDLE_ACK_PENDING, &call->flags)) {
+ rxrpc_send_ACK(call, RXRPC_ACK_IDLE, serial,
+ rxrpc_propose_ack_rotate_rx);
+ rxrpc_transmit_ack_packets(call->peer->local);
+ }
}
/*
- * Locate the data within a packet. This is complicated by:
- *
- * (1) An skb may contain a jumbo packet - so we have to find the appropriate
- * subpacket.
- *
- * (2) The (sub)packets may be encrypted and, if so, the encrypted portion
- * contains an extra header which includes the true length of the data,
- * excluding any encrypted padding.
+ * Decrypt and verify a DATA packet.
*/
-static int rxrpc_locate_data(struct rxrpc_call *call, struct sk_buff *skb,
- u8 *_annotation,
- unsigned int *_offset, unsigned int *_len,
- bool *_last)
+static int rxrpc_verify_data(struct rxrpc_call *call, struct sk_buff *skb)
{
struct rxrpc_skb_priv *sp = rxrpc_skb(skb);
- unsigned int offset = sizeof(struct rxrpc_wire_header);
- unsigned int len;
- bool last = false;
- int ret;
- u8 annotation = *_annotation;
- u8 subpacket = annotation & RXRPC_RX_ANNO_SUBPACKET;
-
- /* Locate the subpacket */
- offset += subpacket * RXRPC_JUMBO_SUBPKTLEN;
- len = skb->len - offset;
- if (subpacket < sp->nr_subpackets - 1)
- len = RXRPC_JUMBO_DATALEN;
- else if (sp->rx_flags & RXRPC_SKB_INCL_LAST)
- last = true;
-
- if (!(annotation & RXRPC_RX_ANNO_VERIFIED)) {
- ret = rxrpc_verify_packet(call, skb, annotation, offset, len);
- if (ret < 0)
- return ret;
- *_annotation |= RXRPC_RX_ANNO_VERIFIED;
- }
- *_offset = offset;
- *_len = len;
- *_last = last;
- call->security->locate_data(call, skb, _offset, _len);
- return 0;
+ if (sp->flags & RXRPC_RX_VERIFIED)
+ return 0;
+ return call->security->verify_packet(call, skb);
}
/*
@@ -357,69 +295,55 @@ static int rxrpc_recvmsg_data(struct socket *sock, struct rxrpc_call *call,
{
struct rxrpc_skb_priv *sp;
struct sk_buff *skb;
- rxrpc_serial_t serial;
- rxrpc_seq_t hard_ack, top, seq;
+ rxrpc_seq_t seq = 0;
size_t remain;
- bool rx_pkt_last;
unsigned int rx_pkt_offset, rx_pkt_len;
- int ix, copy, ret = -EAGAIN, ret2;
-
- if (test_and_clear_bit(RXRPC_CALL_RX_UNDERRUN, &call->flags) &&
- call->ackr_reason)
- rxrpc_send_ack_packet(call, false, NULL);
+ int copy, ret = -EAGAIN, ret2;
rx_pkt_offset = call->rx_pkt_offset;
rx_pkt_len = call->rx_pkt_len;
- rx_pkt_last = call->rx_pkt_last;
if (call->state >= RXRPC_CALL_SERVER_ACK_REQUEST) {
- seq = call->rx_hard_ack;
+ seq = lower_32_bits(atomic64_read(&call->ackr_window)) - 1;
ret = 1;
goto done;
}
- /* Barriers against rxrpc_input_data(). */
- hard_ack = call->rx_hard_ack;
- seq = hard_ack + 1;
-
- while (top = smp_load_acquire(&call->rx_top),
- before_eq(seq, top)
- ) {
- ix = seq & RXRPC_RXTX_BUFF_MASK;
- skb = call->rxtx_buffer[ix];
- if (!skb) {
- trace_rxrpc_recvmsg(call, rxrpc_recvmsg_hole, seq,
- rx_pkt_offset, rx_pkt_len, 0);
- break;
- }
- smp_rmb();
+ /* No one else can be removing stuff from the queue, so we shouldn't
+ * need the Rx lock to walk it.
+ */
+ skb = skb_peek(&call->recvmsg_queue);
+ while (skb) {
rxrpc_see_skb(skb, rxrpc_skb_seen);
sp = rxrpc_skb(skb);
+ seq = sp->hdr.seq;
- if (!(flags & MSG_PEEK)) {
- serial = sp->hdr.serial;
- serial += call->rxtx_annotations[ix] & RXRPC_RX_ANNO_SUBPACKET;
- trace_rxrpc_receive(call, rxrpc_receive_front,
- serial, seq);
+ if (after_eq(call->rx_consumed, seq)) {
+ kdebug("obsolete %x %x", call->rx_consumed, seq);
+ goto skip_obsolete;
}
+ if (!(flags & MSG_PEEK))
+ trace_rxrpc_receive(call, rxrpc_receive_front,
+ sp->hdr.serial, seq);
+
if (msg)
sock_recv_timestamp(msg, sock->sk, skb);
if (rx_pkt_offset == 0) {
- ret2 = rxrpc_locate_data(call, skb,
- &call->rxtx_annotations[ix],
- &rx_pkt_offset, &rx_pkt_len,
- &rx_pkt_last);
- trace_rxrpc_recvmsg(call, rxrpc_recvmsg_next, seq,
- rx_pkt_offset, rx_pkt_len, ret2);
+ ret2 = rxrpc_verify_data(call, skb);
+ rx_pkt_offset = sp->offset;
+ rx_pkt_len = sp->len;
+ trace_rxrpc_recvdata(call, rxrpc_recvmsg_next, seq,
+ rx_pkt_offset, rx_pkt_len, ret2);
if (ret2 < 0) {
ret = ret2;
goto out;
}
+ rxrpc_transmit_ack_packets(call->peer->local);
} else {
- trace_rxrpc_recvmsg(call, rxrpc_recvmsg_cont, seq,
- rx_pkt_offset, rx_pkt_len, 0);
+ trace_rxrpc_recvdata(call, rxrpc_recvmsg_cont, seq,
+ rx_pkt_offset, rx_pkt_len, 0);
}
/* We have to handle short, empty and used-up DATA packets. */
@@ -442,37 +366,34 @@ static int rxrpc_recvmsg_data(struct socket *sock, struct rxrpc_call *call,
}
if (rx_pkt_len > 0) {
- trace_rxrpc_recvmsg(call, rxrpc_recvmsg_full, seq,
- rx_pkt_offset, rx_pkt_len, 0);
+ trace_rxrpc_recvdata(call, rxrpc_recvmsg_full, seq,
+ rx_pkt_offset, rx_pkt_len, 0);
ASSERTCMP(*_offset, ==, len);
ret = 0;
break;
}
+ skip_obsolete:
/* The whole packet has been transferred. */
- if (!(flags & MSG_PEEK))
- rxrpc_rotate_rx_window(call);
+ if (sp->hdr.flags & RXRPC_LAST_PACKET)
+ ret = 1;
rx_pkt_offset = 0;
rx_pkt_len = 0;
- if (rx_pkt_last) {
- ASSERTCMP(seq, ==, READ_ONCE(call->rx_top));
- ret = 1;
- goto out;
- }
+ skb = skb_peek_next(skb, &call->recvmsg_queue);
- seq++;
+ if (!(flags & MSG_PEEK))
+ rxrpc_rotate_rx_window(call);
}
out:
if (!(flags & MSG_PEEK)) {
call->rx_pkt_offset = rx_pkt_offset;
call->rx_pkt_len = rx_pkt_len;
- call->rx_pkt_last = rx_pkt_last;
}
done:
- trace_rxrpc_recvmsg(call, rxrpc_recvmsg_data_return, seq,
- rx_pkt_offset, rx_pkt_len, ret);
+ trace_rxrpc_recvdata(call, rxrpc_recvmsg_data_return, seq,
+ rx_pkt_offset, rx_pkt_len, ret);
if (ret == -EAGAIN)
set_bit(RXRPC_CALL_RX_UNDERRUN, &call->flags);
return ret;
@@ -495,7 +416,7 @@ int rxrpc_recvmsg(struct socket *sock, struct msghdr *msg, size_t len,
DEFINE_WAIT(wait);
- trace_rxrpc_recvmsg(NULL, rxrpc_recvmsg_enter, 0, 0, 0, 0);
+ trace_rxrpc_recvmsg(NULL, rxrpc_recvmsg_enter, 0);
if (flags & (MSG_OOB | MSG_TRUNC))
return -EOPNOTSUPP;
@@ -532,8 +453,7 @@ try_again:
if (list_empty(&rx->recvmsg_q)) {
if (signal_pending(current))
goto wait_interrupted;
- trace_rxrpc_recvmsg(NULL, rxrpc_recvmsg_wait,
- 0, 0, 0, 0);
+ trace_rxrpc_recvmsg(NULL, rxrpc_recvmsg_wait, 0);
timeo = schedule_timeout(timeo);
}
finish_wait(sk_sleep(&rx->sk), &wait);
@@ -552,7 +472,7 @@ try_again:
rxrpc_get_call(call, rxrpc_call_got);
write_unlock_bh(&rx->recvmsg_lock);
- trace_rxrpc_recvmsg(call, rxrpc_recvmsg_dequeue, 0, 0, 0, 0);
+ trace_rxrpc_recvmsg(call, rxrpc_recvmsg_dequeue, 0);
/* We're going to drop the socket lock, so we need to lock the call
* against interference by sendmsg.
@@ -605,8 +525,8 @@ try_again:
if (ret == -EAGAIN)
ret = 0;
- if (after(call->rx_top, call->rx_hard_ack) &&
- call->rxtx_buffer[(call->rx_hard_ack + 1) & RXRPC_RXTX_BUFF_MASK])
+ rxrpc_transmit_ack_packets(call->peer->local);
+ if (!skb_queue_empty(&call->recvmsg_queue))
rxrpc_notify_socket(call);
break;
default:
@@ -636,7 +556,7 @@ try_again:
error_unlock_call:
mutex_unlock(&call->user_mutex);
rxrpc_put_call(call, rxrpc_call_put);
- trace_rxrpc_recvmsg(call, rxrpc_recvmsg_return, 0, 0, 0, ret);
+ trace_rxrpc_recvmsg(call, rxrpc_recvmsg_return, ret);
return ret;
error_requeue_call:
@@ -644,14 +564,14 @@ error_requeue_call:
write_lock_bh(&rx->recvmsg_lock);
list_add(&call->recvmsg_link, &rx->recvmsg_q);
write_unlock_bh(&rx->recvmsg_lock);
- trace_rxrpc_recvmsg(call, rxrpc_recvmsg_requeue, 0, 0, 0, 0);
+ trace_rxrpc_recvmsg(call, rxrpc_recvmsg_requeue, 0);
} else {
rxrpc_put_call(call, rxrpc_call_put);
}
error_no_call:
release_sock(&rx->sk);
error_trace:
- trace_rxrpc_recvmsg(call, rxrpc_recvmsg_return, 0, 0, 0, ret);
+ trace_rxrpc_recvmsg(call, rxrpc_recvmsg_return, ret);
return ret;
wait_interrupted:
@@ -735,17 +655,7 @@ int rxrpc_kernel_recv_data(struct socket *sock, struct rxrpc_call *call,
read_phase_complete:
ret = 1;
out:
- switch (call->ackr_reason) {
- case RXRPC_ACK_IDLE:
- break;
- case RXRPC_ACK_DELAY:
- if (ret != -EAGAIN)
- break;
- fallthrough;
- default:
- rxrpc_send_ack_packet(call, false, NULL);
- }
-
+ rxrpc_transmit_ack_packets(call->peer->local);
if (_service)
*_service = call->service_id;
mutex_unlock(&call->user_mutex);
diff --git a/net/rxrpc/rxkad.c b/net/rxrpc/rxkad.c
index 78fa0524156f..2706e59bf992 100644
--- a/net/rxrpc/rxkad.c
+++ b/net/rxrpc/rxkad.c
@@ -233,16 +233,8 @@ static int rxkad_prime_packet_security(struct rxrpc_connection *conn,
static struct skcipher_request *rxkad_get_call_crypto(struct rxrpc_call *call)
{
struct crypto_skcipher *tfm = &call->conn->rxkad.cipher->base;
- struct skcipher_request *cipher_req = call->cipher_req;
- if (!cipher_req) {
- cipher_req = skcipher_request_alloc(tfm, GFP_NOFS);
- if (!cipher_req)
- return NULL;
- call->cipher_req = cipher_req;
- }
-
- return cipher_req;
+ return skcipher_request_alloc(tfm, GFP_NOFS);
}
/*
@@ -250,20 +242,16 @@ static struct skcipher_request *rxkad_get_call_crypto(struct rxrpc_call *call)
*/
static void rxkad_free_call_crypto(struct rxrpc_call *call)
{
- if (call->cipher_req)
- skcipher_request_free(call->cipher_req);
- call->cipher_req = NULL;
}
/*
* partially encrypt a packet (level 1 security)
*/
static int rxkad_secure_packet_auth(const struct rxrpc_call *call,
- struct sk_buff *skb, u32 data_size,
+ struct rxrpc_txbuf *txb,
struct skcipher_request *req)
{
- struct rxrpc_skb_priv *sp = rxrpc_skb(skb);
- struct rxkad_level1_hdr hdr;
+ struct rxkad_level1_hdr *hdr = (void *)txb->data;
struct rxrpc_crypt iv;
struct scatterlist sg;
size_t pad;
@@ -271,22 +259,22 @@ static int rxkad_secure_packet_auth(const struct rxrpc_call *call,
_enter("");
- check = sp->hdr.seq ^ call->call_id;
- data_size |= (u32)check << 16;
+ check = txb->seq ^ ntohl(txb->wire.callNumber);
+ hdr->data_size = htonl((u32)check << 16 | txb->len);
- hdr.data_size = htonl(data_size);
- memcpy(skb->head, &hdr, sizeof(hdr));
-
- pad = sizeof(struct rxkad_level1_hdr) + data_size;
+ txb->len += sizeof(struct rxkad_level1_hdr);
+ pad = txb->len;
pad = RXKAD_ALIGN - pad;
pad &= RXKAD_ALIGN - 1;
- if (pad)
- skb_put_zero(skb, pad);
+ if (pad) {
+ memset(txb->data + txb->offset, 0, pad);
+ txb->len += pad;
+ }
/* start the encryption afresh */
memset(&iv, 0, sizeof(iv));
- sg_init_one(&sg, skb->head, 8);
+ sg_init_one(&sg, txb->data, 8);
skcipher_request_set_sync_tfm(req, call->conn->rxkad.cipher);
skcipher_request_set_callback(req, 0, NULL, NULL);
skcipher_request_set_crypt(req, &sg, &sg, 8, iv.x);
@@ -301,87 +289,63 @@ static int rxkad_secure_packet_auth(const struct rxrpc_call *call,
* wholly encrypt a packet (level 2 security)
*/
static int rxkad_secure_packet_encrypt(const struct rxrpc_call *call,
- struct sk_buff *skb,
- u32 data_size,
+ struct rxrpc_txbuf *txb,
struct skcipher_request *req)
{
const struct rxrpc_key_token *token;
- struct rxkad_level2_hdr rxkhdr;
- struct rxrpc_skb_priv *sp;
+ struct rxkad_level2_hdr *rxkhdr = (void *)txb->data;
struct rxrpc_crypt iv;
- struct scatterlist sg[16];
- unsigned int len;
+ struct scatterlist sg;
size_t pad;
u16 check;
- int err;
-
- sp = rxrpc_skb(skb);
+ int ret;
_enter("");
- check = sp->hdr.seq ^ call->call_id;
+ check = txb->seq ^ ntohl(txb->wire.callNumber);
- rxkhdr.data_size = htonl(data_size | (u32)check << 16);
- rxkhdr.checksum = 0;
- memcpy(skb->head, &rxkhdr, sizeof(rxkhdr));
+ rxkhdr->data_size = htonl(txb->len | (u32)check << 16);
+ rxkhdr->checksum = 0;
- pad = sizeof(struct rxkad_level2_hdr) + data_size;
+ txb->len += sizeof(struct rxkad_level2_hdr);
+ pad = txb->len;
pad = RXKAD_ALIGN - pad;
pad &= RXKAD_ALIGN - 1;
- if (pad)
- skb_put_zero(skb, pad);
+ if (pad) {
+ memset(txb->data + txb->offset, 0, pad);
+ txb->len += pad;
+ }
/* encrypt from the session key */
token = call->conn->params.key->payload.data[0];
memcpy(&iv, token->kad->session_key, sizeof(iv));
- sg_init_one(&sg[0], skb->head, sizeof(rxkhdr));
+ sg_init_one(&sg, txb->data, txb->len);
skcipher_request_set_sync_tfm(req, call->conn->rxkad.cipher);
skcipher_request_set_callback(req, 0, NULL, NULL);
- skcipher_request_set_crypt(req, &sg[0], &sg[0], sizeof(rxkhdr), iv.x);
- crypto_skcipher_encrypt(req);
-
- /* we want to encrypt the skbuff in-place */
- err = -EMSGSIZE;
- if (skb_shinfo(skb)->nr_frags > 16)
- goto out;
-
- len = round_up(data_size, RXKAD_ALIGN);
-
- sg_init_table(sg, ARRAY_SIZE(sg));
- err = skb_to_sgvec(skb, sg, 8, len);
- if (unlikely(err < 0))
- goto out;
- skcipher_request_set_crypt(req, sg, sg, len, iv.x);
- crypto_skcipher_encrypt(req);
-
- _leave(" = 0");
- err = 0;
-
-out:
+ skcipher_request_set_crypt(req, &sg, &sg, txb->len, iv.x);
+ ret = crypto_skcipher_encrypt(req);
skcipher_request_zero(req);
- return err;
+ return ret;
}
/*
* checksum an RxRPC packet header
*/
-static int rxkad_secure_packet(struct rxrpc_call *call,
- struct sk_buff *skb,
- size_t data_size)
+static int rxkad_secure_packet(struct rxrpc_call *call, struct rxrpc_txbuf *txb)
{
- struct rxrpc_skb_priv *sp;
struct skcipher_request *req;
struct rxrpc_crypt iv;
struct scatterlist sg;
+ union {
+ __be32 buf[2];
+ } crypto __aligned(8);
u32 x, y;
int ret;
- sp = rxrpc_skb(skb);
-
- _enter("{%d{%x}},{#%u},%zu,",
+ _enter("{%d{%x}},{#%u},%u,",
call->debug_id, key_serial(call->conn->params.key),
- sp->hdr.seq, data_size);
+ txb->seq, txb->len);
if (!call->conn->rxkad.cipher)
return 0;
@@ -398,39 +362,40 @@ static int rxkad_secure_packet(struct rxrpc_call *call,
memcpy(&iv, call->conn->rxkad.csum_iv.x, sizeof(iv));
/* calculate the security checksum */
- x = (call->cid & RXRPC_CHANNELMASK) << (32 - RXRPC_CIDSHIFT);
- x |= sp->hdr.seq & 0x3fffffff;
- call->crypto_buf[0] = htonl(call->call_id);
- call->crypto_buf[1] = htonl(x);
+ x = (ntohl(txb->wire.cid) & RXRPC_CHANNELMASK) << (32 - RXRPC_CIDSHIFT);
+ x |= txb->seq & 0x3fffffff;
+ crypto.buf[0] = txb->wire.callNumber;
+ crypto.buf[1] = htonl(x);
- sg_init_one(&sg, call->crypto_buf, 8);
+ sg_init_one(&sg, crypto.buf, 8);
skcipher_request_set_sync_tfm(req, call->conn->rxkad.cipher);
skcipher_request_set_callback(req, 0, NULL, NULL);
skcipher_request_set_crypt(req, &sg, &sg, 8, iv.x);
crypto_skcipher_encrypt(req);
skcipher_request_zero(req);
- y = ntohl(call->crypto_buf[1]);
+ y = ntohl(crypto.buf[1]);
y = (y >> 16) & 0xffff;
if (y == 0)
y = 1; /* zero checksums are not permitted */
- sp->hdr.cksum = y;
+ txb->wire.cksum = htons(y);
switch (call->conn->params.security_level) {
case RXRPC_SECURITY_PLAIN:
ret = 0;
break;
case RXRPC_SECURITY_AUTH:
- ret = rxkad_secure_packet_auth(call, skb, data_size, req);
+ ret = rxkad_secure_packet_auth(call, txb, req);
break;
case RXRPC_SECURITY_ENCRYPT:
- ret = rxkad_secure_packet_encrypt(call, skb, data_size, req);
+ ret = rxkad_secure_packet_encrypt(call, txb, req);
break;
default:
ret = -EPERM;
break;
}
+ skcipher_request_free(req);
_leave(" = %d [set %x]", ret, y);
return ret;
}
@@ -439,11 +404,11 @@ static int rxkad_secure_packet(struct rxrpc_call *call,
* decrypt partial encryption on a packet (level 1 security)
*/
static int rxkad_verify_packet_1(struct rxrpc_call *call, struct sk_buff *skb,
- unsigned int offset, unsigned int len,
rxrpc_seq_t seq,
struct skcipher_request *req)
{
struct rxkad_level1_hdr sechdr;
+ struct rxrpc_skb_priv *sp = rxrpc_skb(skb);
struct rxrpc_crypt iv;
struct scatterlist sg[16];
bool aborted;
@@ -453,9 +418,9 @@ static int rxkad_verify_packet_1(struct rxrpc_call *call, struct sk_buff *skb,
_enter("");
- if (len < 8) {
+ if (sp->len < 8) {
aborted = rxrpc_abort_eproto(call, skb, "rxkad_1_hdr", "V1H",
- RXKADSEALEDINCON);
+ RXKADSEALEDINCON);
goto protocol_error;
}
@@ -463,7 +428,7 @@ static int rxkad_verify_packet_1(struct rxrpc_call *call, struct sk_buff *skb,
* directly into the target buffer.
*/
sg_init_table(sg, ARRAY_SIZE(sg));
- ret = skb_to_sgvec(skb, sg, offset, 8);
+ ret = skb_to_sgvec(skb, sg, sp->offset, 8);
if (unlikely(ret < 0))
return ret;
@@ -477,12 +442,13 @@ static int rxkad_verify_packet_1(struct rxrpc_call *call, struct sk_buff *skb,
skcipher_request_zero(req);
/* Extract the decrypted packet length */
- if (skb_copy_bits(skb, offset, &sechdr, sizeof(sechdr)) < 0) {
+ if (skb_copy_bits(skb, sp->offset, &sechdr, sizeof(sechdr)) < 0) {
aborted = rxrpc_abort_eproto(call, skb, "rxkad_1_len", "XV1",
RXKADDATALEN);
goto protocol_error;
}
- len -= sizeof(sechdr);
+ sp->offset += sizeof(sechdr);
+ sp->len -= sizeof(sechdr);
buf = ntohl(sechdr.data_size);
data_size = buf & 0xffff;
@@ -496,11 +462,12 @@ static int rxkad_verify_packet_1(struct rxrpc_call *call, struct sk_buff *skb,
goto protocol_error;
}
- if (data_size > len) {
+ if (data_size > sp->len) {
aborted = rxrpc_abort_eproto(call, skb, "rxkad_1_datalen", "V1L",
RXKADDATALEN);
goto protocol_error;
}
+ sp->len = data_size;
_leave(" = 0 [dlen=%x]", data_size);
return 0;
@@ -515,12 +482,12 @@ protocol_error:
* wholly decrypt a packet (level 2 security)
*/
static int rxkad_verify_packet_2(struct rxrpc_call *call, struct sk_buff *skb,
- unsigned int offset, unsigned int len,
rxrpc_seq_t seq,
struct skcipher_request *req)
{
const struct rxrpc_key_token *token;
struct rxkad_level2_hdr sechdr;
+ struct rxrpc_skb_priv *sp = rxrpc_skb(skb);
struct rxrpc_crypt iv;
struct scatterlist _sg[4], *sg;
bool aborted;
@@ -528,9 +495,9 @@ static int rxkad_verify_packet_2(struct rxrpc_call *call, struct sk_buff *skb,
u16 check;
int nsg, ret;
- _enter(",{%d}", skb->len);
+ _enter(",{%d}", sp->len);
- if (len < 8) {
+ if (sp->len < 8) {
aborted = rxrpc_abort_eproto(call, skb, "rxkad_2_hdr", "V2H",
RXKADSEALEDINCON);
goto protocol_error;
@@ -550,7 +517,7 @@ static int rxkad_verify_packet_2(struct rxrpc_call *call, struct sk_buff *skb,
}
sg_init_table(sg, nsg);
- ret = skb_to_sgvec(skb, sg, offset, len);
+ ret = skb_to_sgvec(skb, sg, sp->offset, sp->len);
if (unlikely(ret < 0)) {
if (sg != _sg)
kfree(sg);
@@ -563,19 +530,20 @@ static int rxkad_verify_packet_2(struct rxrpc_call *call, struct sk_buff *skb,
skcipher_request_set_sync_tfm(req, call->conn->rxkad.cipher);
skcipher_request_set_callback(req, 0, NULL, NULL);
- skcipher_request_set_crypt(req, sg, sg, len, iv.x);
+ skcipher_request_set_crypt(req, sg, sg, sp->len, iv.x);
crypto_skcipher_decrypt(req);
skcipher_request_zero(req);
if (sg != _sg)
kfree(sg);
/* Extract the decrypted packet length */
- if (skb_copy_bits(skb, offset, &sechdr, sizeof(sechdr)) < 0) {
+ if (skb_copy_bits(skb, sp->offset, &sechdr, sizeof(sechdr)) < 0) {
aborted = rxrpc_abort_eproto(call, skb, "rxkad_2_len", "XV2",
RXKADDATALEN);
goto protocol_error;
}
- len -= sizeof(sechdr);
+ sp->offset += sizeof(sechdr);
+ sp->len -= sizeof(sechdr);
buf = ntohl(sechdr.data_size);
data_size = buf & 0xffff;
@@ -589,12 +557,13 @@ static int rxkad_verify_packet_2(struct rxrpc_call *call, struct sk_buff *skb,
goto protocol_error;
}
- if (data_size > len) {
+ if (data_size > sp->len) {
aborted = rxrpc_abort_eproto(call, skb, "rxkad_2_datalen", "V2L",
RXKADDATALEN);
goto protocol_error;
}
+ sp->len = data_size;
_leave(" = 0 [dlen=%x]", data_size);
return 0;
@@ -609,17 +578,20 @@ nomem:
}
/*
- * Verify the security on a received packet or subpacket (if part of a
- * jumbo packet).
+ * Verify the security on a received packet and the subpackets therein.
*/
-static int rxkad_verify_packet(struct rxrpc_call *call, struct sk_buff *skb,
- unsigned int offset, unsigned int len,
- rxrpc_seq_t seq, u16 expected_cksum)
+static int rxkad_verify_packet(struct rxrpc_call *call, struct sk_buff *skb)
{
+ struct rxrpc_skb_priv *sp = rxrpc_skb(skb);
struct skcipher_request *req;
struct rxrpc_crypt iv;
struct scatterlist sg;
+ union {
+ __be32 buf[2];
+ } crypto __aligned(8);
+ rxrpc_seq_t seq = sp->hdr.seq;
bool aborted;
+ int ret;
u16 cksum;
u32 x, y;
@@ -639,22 +611,22 @@ static int rxkad_verify_packet(struct rxrpc_call *call, struct sk_buff *skb,
/* validate the security checksum */
x = (call->cid & RXRPC_CHANNELMASK) << (32 - RXRPC_CIDSHIFT);
x |= seq & 0x3fffffff;
- call->crypto_buf[0] = htonl(call->call_id);
- call->crypto_buf[1] = htonl(x);
+ crypto.buf[0] = htonl(call->call_id);
+ crypto.buf[1] = htonl(x);
- sg_init_one(&sg, call->crypto_buf, 8);
+ sg_init_one(&sg, crypto.buf, 8);
skcipher_request_set_sync_tfm(req, call->conn->rxkad.cipher);
skcipher_request_set_callback(req, 0, NULL, NULL);
skcipher_request_set_crypt(req, &sg, &sg, 8, iv.x);
crypto_skcipher_encrypt(req);
skcipher_request_zero(req);
- y = ntohl(call->crypto_buf[1]);
+ y = ntohl(crypto.buf[1]);
cksum = (y >> 16) & 0xffff;
if (cksum == 0)
cksum = 1; /* zero checksums are not permitted */
- if (cksum != expected_cksum) {
+ if (cksum != sp->hdr.cksum) {
aborted = rxrpc_abort_eproto(call, skb, "rxkad_csum", "VCK",
RXKADSEALEDINCON);
goto protocol_error;
@@ -662,15 +634,22 @@ static int rxkad_verify_packet(struct rxrpc_call *call, struct sk_buff *skb,
switch (call->conn->params.security_level) {
case RXRPC_SECURITY_PLAIN:
- return 0;
+ ret = 0;
+ break;
case RXRPC_SECURITY_AUTH:
- return rxkad_verify_packet_1(call, skb, offset, len, seq, req);
+ ret = rxkad_verify_packet_1(call, skb, seq, req);
+ break;
case RXRPC_SECURITY_ENCRYPT:
- return rxkad_verify_packet_2(call, skb, offset, len, seq, req);
+ ret = rxkad_verify_packet_2(call, skb, seq, req);
+ break;
default:
- return -ENOANO;
+ ret = -ENOANO;
+ break;
}
+ skcipher_request_free(req);
+ return ret;
+
protocol_error:
if (aborted)
rxrpc_send_abort_packet(call);
@@ -678,52 +657,6 @@ protocol_error:
}
/*
- * Locate the data contained in a packet that was partially encrypted.
- */
-static void rxkad_locate_data_1(struct rxrpc_call *call, struct sk_buff *skb,
- unsigned int *_offset, unsigned int *_len)
-{
- struct rxkad_level1_hdr sechdr;
-
- if (skb_copy_bits(skb, *_offset, &sechdr, sizeof(sechdr)) < 0)
- BUG();
- *_offset += sizeof(sechdr);
- *_len = ntohl(sechdr.data_size) & 0xffff;
-}
-
-/*
- * Locate the data contained in a packet that was completely encrypted.
- */
-static void rxkad_locate_data_2(struct rxrpc_call *call, struct sk_buff *skb,
- unsigned int *_offset, unsigned int *_len)
-{
- struct rxkad_level2_hdr sechdr;
-
- if (skb_copy_bits(skb, *_offset, &sechdr, sizeof(sechdr)) < 0)
- BUG();
- *_offset += sizeof(sechdr);
- *_len = ntohl(sechdr.data_size) & 0xffff;
-}
-
-/*
- * Locate the data contained in an already decrypted packet.
- */
-static void rxkad_locate_data(struct rxrpc_call *call, struct sk_buff *skb,
- unsigned int *_offset, unsigned int *_len)
-{
- switch (call->conn->params.security_level) {
- case RXRPC_SECURITY_AUTH:
- rxkad_locate_data_1(call, skb, _offset, _len);
- return;
- case RXRPC_SECURITY_ENCRYPT:
- rxkad_locate_data_2(call, skb, _offset, _len);
- return;
- default:
- return;
- }
-}
-
-/*
* issue a challenge
*/
static int rxkad_issue_challenge(struct rxrpc_connection *conn)
@@ -1234,7 +1167,6 @@ static int rxkad_verify_response(struct rxrpc_connection *conn,
abort_code = RXKADPACKETSHORT;
if (skb_copy_bits(skb, sizeof(struct rxrpc_wire_header) + sizeof(*response),
ticket, ticket_len) < 0)
- goto protocol_error_free;
ret = rxkad_decrypt_ticket(conn, server_key, skb, ticket, ticket_len,
&session_key, &expiry, _abort_code);
@@ -1397,7 +1329,6 @@ const struct rxrpc_security rxkad = {
.secure_packet = rxkad_secure_packet,
.verify_packet = rxkad_verify_packet,
.free_call_crypto = rxkad_free_call_crypto,
- .locate_data = rxkad_locate_data,
.issue_challenge = rxkad_issue_challenge,
.respond_to_challenge = rxkad_respond_to_challenge,
.verify_response = rxkad_verify_response,
diff --git a/net/rxrpc/sendmsg.c b/net/rxrpc/sendmsg.c
index 3c3a626459de..e5fd8a95bf71 100644
--- a/net/rxrpc/sendmsg.c
+++ b/net/rxrpc/sendmsg.c
@@ -22,10 +22,26 @@
*/
static bool rxrpc_check_tx_space(struct rxrpc_call *call, rxrpc_seq_t *_tx_win)
{
- unsigned int win_size =
- min_t(unsigned int, call->tx_winsize,
- call->cong_cwnd + call->cong_extra);
- rxrpc_seq_t tx_win = READ_ONCE(call->tx_hard_ack);
+ unsigned int win_size;
+ rxrpc_seq_t tx_win = smp_load_acquire(&call->acks_hard_ack);
+
+ /* If we haven't transmitted anything for >1RTT, we should reset the
+ * congestion management state.
+ */
+ if (ktime_before(ktime_add_us(call->tx_last_sent,
+ call->peer->srtt_us >> 3),
+ ktime_get_real())) {
+ if (RXRPC_TX_SMSS > 2190)
+ win_size = 2;
+ else if (RXRPC_TX_SMSS > 1095)
+ win_size = 3;
+ else
+ win_size = 4;
+ win_size += call->cong_extra;
+ } else {
+ win_size = min_t(unsigned int, call->tx_winsize,
+ call->cong_cwnd + call->cong_extra);
+ }
if (_tx_win)
*_tx_win = tx_win;
@@ -50,7 +66,12 @@ static int rxrpc_wait_for_tx_window_intr(struct rxrpc_sock *rx,
if (signal_pending(current))
return sock_intr_errno(*timeo);
- trace_rxrpc_transmit(call, rxrpc_transmit_wait);
+ if (READ_ONCE(call->acks_hard_ack) != call->tx_bottom) {
+ rxrpc_shrink_call_tx_buffer(call);
+ continue;
+ }
+
+ trace_rxrpc_txqueue(call, rxrpc_txqueue_wait);
*timeo = schedule_timeout(*timeo);
}
}
@@ -71,12 +92,11 @@ static int rxrpc_wait_for_tx_window_waitall(struct rxrpc_sock *rx,
rtt = 2;
timeout = rtt;
- tx_start = READ_ONCE(call->tx_hard_ack);
+ tx_start = smp_load_acquire(&call->acks_hard_ack);
for (;;) {
set_current_state(TASK_UNINTERRUPTIBLE);
- tx_win = READ_ONCE(call->tx_hard_ack);
if (rxrpc_check_tx_space(call, &tx_win))
return 0;
@@ -87,12 +107,17 @@ static int rxrpc_wait_for_tx_window_waitall(struct rxrpc_sock *rx,
tx_win == tx_start && signal_pending(current))
return -EINTR;
+ if (READ_ONCE(call->acks_hard_ack) != call->tx_bottom) {
+ rxrpc_shrink_call_tx_buffer(call);
+ continue;
+ }
+
if (tx_win != tx_start) {
timeout = rtt;
tx_start = tx_win;
}
- trace_rxrpc_transmit(call, rxrpc_transmit_wait);
+ trace_rxrpc_txqueue(call, rxrpc_txqueue_wait);
timeout = schedule_timeout(timeout);
}
}
@@ -112,7 +137,12 @@ static int rxrpc_wait_for_tx_window_nonintr(struct rxrpc_sock *rx,
if (call->state >= RXRPC_CALL_COMPLETE)
return call->error;
- trace_rxrpc_transmit(call, rxrpc_transmit_wait);
+ if (READ_ONCE(call->acks_hard_ack) != call->tx_bottom) {
+ rxrpc_shrink_call_tx_buffer(call);
+ continue;
+ }
+
+ trace_rxrpc_txqueue(call, rxrpc_txqueue_wait);
*timeo = schedule_timeout(*timeo);
}
}
@@ -129,8 +159,8 @@ static int rxrpc_wait_for_tx_window(struct rxrpc_sock *rx,
DECLARE_WAITQUEUE(myself, current);
int ret;
- _enter(",{%u,%u,%u}",
- call->tx_hard_ack, call->tx_top, call->tx_winsize);
+ _enter(",{%u,%u,%u,%u}",
+ call->tx_bottom, call->acks_hard_ack, call->tx_top, call->tx_winsize);
add_wait_queue(&call->waitq, &myself);
@@ -155,24 +185,6 @@ static int rxrpc_wait_for_tx_window(struct rxrpc_sock *rx,
}
/*
- * Schedule an instant Tx resend.
- */
-static inline void rxrpc_instant_resend(struct rxrpc_call *call, int ix)
-{
- spin_lock_bh(&call->lock);
-
- if (call->state < RXRPC_CALL_COMPLETE) {
- call->rxtx_annotations[ix] =
- (call->rxtx_annotations[ix] & RXRPC_TX_ANNO_LAST) |
- RXRPC_TX_ANNO_RETRANS;
- if (!test_and_set_bit(RXRPC_CALL_EV_RESEND, &call->events))
- rxrpc_queue_call(call);
- }
-
- spin_unlock_bh(&call->lock);
-}
-
-/*
* Notify the owner of the call that the transmit phase is ended and the last
* packet has been queued.
*/
@@ -188,38 +200,35 @@ static void rxrpc_notify_end_tx(struct rxrpc_sock *rx, struct rxrpc_call *call,
* the packet immediately. Returns the error from rxrpc_send_data_packet()
* in case the caller wants to do something with it.
*/
-static int rxrpc_queue_packet(struct rxrpc_sock *rx, struct rxrpc_call *call,
- struct sk_buff *skb, bool last,
- rxrpc_notify_end_tx_t notify_end_tx)
+static void rxrpc_queue_packet(struct rxrpc_sock *rx, struct rxrpc_call *call,
+ struct rxrpc_txbuf *txb,
+ rxrpc_notify_end_tx_t notify_end_tx)
{
- struct rxrpc_skb_priv *sp = rxrpc_skb(skb);
unsigned long now;
- rxrpc_seq_t seq = sp->hdr.seq;
- int ret, ix;
- u8 annotation = RXRPC_TX_ANNO_UNACK;
+ rxrpc_seq_t seq = txb->seq;
+ bool last = test_bit(RXRPC_TXBUF_LAST, &txb->flags);
+ int ret;
- _net("queue skb %p [%d]", skb, seq);
+ rxrpc_inc_stat(call->rxnet, stat_tx_data);
ASSERTCMP(seq, ==, call->tx_top + 1);
- if (last)
- annotation |= RXRPC_TX_ANNO_LAST;
-
/* We have to set the timestamp before queueing as the retransmit
* algorithm can see the packet as soon as we queue it.
*/
- skb->tstamp = ktime_get_real();
+ txb->last_sent = ktime_get_real();
- ix = seq & RXRPC_RXTX_BUFF_MASK;
- rxrpc_get_skb(skb, rxrpc_skb_got);
- call->rxtx_annotations[ix] = annotation;
- smp_wmb();
- call->rxtx_buffer[ix] = skb;
+ /* Add the packet to the call's output buffer */
+ rxrpc_get_txbuf(txb, rxrpc_txbuf_get_buffer);
+ spin_lock(&call->tx_lock);
+ list_add_tail(&txb->call_link, &call->tx_buffer);
call->tx_top = seq;
+ spin_unlock(&call->tx_lock);
+
if (last)
- trace_rxrpc_transmit(call, rxrpc_transmit_queue_last);
+ trace_rxrpc_txqueue(call, rxrpc_txqueue_queue_last);
else
- trace_rxrpc_transmit(call, rxrpc_transmit_queue);
+ trace_rxrpc_txqueue(call, rxrpc_txqueue_queue);
if (last || call->state == RXRPC_CALL_SERVER_ACK_REQUEST) {
_debug("________awaiting reply/ACK__________");
@@ -232,7 +241,7 @@ static int rxrpc_queue_packet(struct rxrpc_sock *rx, struct rxrpc_call *call,
case RXRPC_CALL_SERVER_ACK_REQUEST:
call->state = RXRPC_CALL_SERVER_SEND_REPLY;
now = jiffies;
- WRITE_ONCE(call->ack_at, now + MAX_JIFFY_OFFSET);
+ WRITE_ONCE(call->delay_ack_at, now + MAX_JIFFY_OFFSET);
if (call->ackr_reason == RXRPC_ACK_DELAY)
call->ackr_reason = 0;
trace_rxrpc_timer(call, rxrpc_timer_init_for_send_reply, now);
@@ -252,7 +261,7 @@ static int rxrpc_queue_packet(struct rxrpc_sock *rx, struct rxrpc_call *call,
if (seq == 1 && rxrpc_is_client_call(call))
rxrpc_expose_client_call(call);
- ret = rxrpc_send_data_packet(call, skb, false);
+ ret = rxrpc_send_data_packet(call, txb);
if (ret < 0) {
switch (ret) {
case -ENETUNREACH:
@@ -262,8 +271,6 @@ static int rxrpc_queue_packet(struct rxrpc_sock *rx, struct rxrpc_call *call,
0, ret);
goto out;
}
- _debug("need instant resend %d", ret);
- rxrpc_instant_resend(call, ix);
} else {
unsigned long now = jiffies;
unsigned long resend_at = now + call->peer->rto_j;
@@ -274,9 +281,7 @@ static int rxrpc_queue_packet(struct rxrpc_sock *rx, struct rxrpc_call *call,
}
out:
- rxrpc_free_skb(skb, rxrpc_skb_freed);
- _leave(" = %d", ret);
- return ret;
+ rxrpc_put_txbuf(txb, rxrpc_txbuf_put_trans);
}
/*
@@ -290,8 +295,7 @@ static int rxrpc_send_data(struct rxrpc_sock *rx,
rxrpc_notify_end_tx_t notify_end_tx,
bool *_dropped_lock)
{
- struct rxrpc_skb_priv *sp;
- struct sk_buff *skb;
+ struct rxrpc_txbuf *txb;
struct sock *sk = &rx->sk;
enum rxrpc_call_state state;
long timeo;
@@ -325,16 +329,15 @@ reload:
goto maybe_error;
}
- skb = call->tx_pending;
+ txb = call->tx_pending;
call->tx_pending = NULL;
- rxrpc_see_skb(skb, rxrpc_skb_seen);
+ if (txb)
+ rxrpc_see_txbuf(txb, rxrpc_txbuf_see_send_more);
do {
- /* Check to see if there's a ping ACK to reply to. */
- if (call->ackr_reason == RXRPC_ACK_PING_RESPONSE)
- rxrpc_send_ack_packet(call, false, NULL);
+ rxrpc_transmit_ack_packets(call->peer->local);
- if (!skb) {
+ if (!txb) {
size_t remain, bufsize, chunk, offset;
_debug("alloc");
@@ -355,53 +358,31 @@ reload:
_debug("SIZE: %zu/%zu @%zu", chunk, bufsize, offset);
/* create a buffer that we can retain until it's ACK'd */
- skb = sock_alloc_send_skb(
- sk, bufsize, msg->msg_flags & MSG_DONTWAIT, &ret);
- if (!skb)
+ ret = -ENOMEM;
+ txb = rxrpc_alloc_txbuf(call, RXRPC_PACKET_TYPE_DATA,
+ GFP_KERNEL);
+ if (!txb)
goto maybe_error;
- sp = rxrpc_skb(skb);
- sp->rx_flags |= RXRPC_SKB_TX_BUFFER;
- rxrpc_new_skb(skb, rxrpc_skb_new);
-
- _debug("ALLOC SEND %p", skb);
-
- ASSERTCMP(skb->mark, ==, 0);
-
- __skb_put(skb, offset);
-
- sp->remain = chunk;
- if (sp->remain > skb_tailroom(skb))
- sp->remain = skb_tailroom(skb);
-
- _net("skb: hr %d, tr %d, hl %d, rm %d",
- skb_headroom(skb),
- skb_tailroom(skb),
- skb_headlen(skb),
- sp->remain);
-
- skb->ip_summed = CHECKSUM_UNNECESSARY;
+ txb->offset = offset;
+ txb->space -= offset;
+ txb->space = min_t(size_t, chunk, txb->space);
}
_debug("append");
- sp = rxrpc_skb(skb);
/* append next segment of data to the current buffer */
if (msg_data_left(msg) > 0) {
- int copy = skb_tailroom(skb);
- ASSERTCMP(copy, >, 0);
- if (copy > msg_data_left(msg))
- copy = msg_data_left(msg);
- if (copy > sp->remain)
- copy = sp->remain;
-
- _debug("add");
- ret = skb_add_data(skb, &msg->msg_iter, copy);
- _debug("added");
- if (ret < 0)
+ size_t copy = min_t(size_t, txb->space, msg_data_left(msg));
+
+ _debug("add %zu", copy);
+ if (!copy_from_iter_full(txb->data + txb->offset, copy,
+ &msg->msg_iter))
goto efault;
- sp->remain -= copy;
- skb->mark += copy;
+ _debug("added");
+ txb->space -= copy;
+ txb->len += copy;
+ txb->offset += copy;
copied += copy;
if (call->tx_total_len != -1)
call->tx_total_len -= copy;
@@ -413,32 +394,22 @@ reload:
goto call_terminated;
/* add the packet to the send queue if it's now full */
- if (sp->remain <= 0 ||
+ if (!txb->space ||
(msg_data_left(msg) == 0 && !more)) {
- struct rxrpc_connection *conn = call->conn;
- uint32_t seq;
-
- seq = call->tx_top + 1;
-
- sp->hdr.seq = seq;
- sp->hdr._rsvd = 0;
- sp->hdr.flags = conn->out_clientflag;
-
- if (msg_data_left(msg) == 0 && !more)
- sp->hdr.flags |= RXRPC_LAST_PACKET;
- else if (call->tx_top - call->tx_hard_ack <
+ if (msg_data_left(msg) == 0 && !more) {
+ txb->wire.flags |= RXRPC_LAST_PACKET;
+ __set_bit(RXRPC_TXBUF_LAST, &txb->flags);
+ }
+ else if (call->tx_top - call->acks_hard_ack <
call->tx_winsize)
- sp->hdr.flags |= RXRPC_MORE_PACKETS;
+ txb->wire.flags |= RXRPC_MORE_PACKETS;
- ret = call->security->secure_packet(call, skb, skb->mark);
+ ret = call->security->secure_packet(call, txb);
if (ret < 0)
goto out;
- ret = rxrpc_queue_packet(rx, call, skb,
- !msg_data_left(msg) && !more,
- notify_end_tx);
- /* Should check for failure here */
- skb = NULL;
+ rxrpc_queue_packet(rx, call, txb, notify_end_tx);
+ txb = NULL;
}
} while (msg_data_left(msg) > 0);
@@ -451,12 +422,12 @@ success:
read_unlock_bh(&call->state_lock);
}
out:
- call->tx_pending = skb;
+ call->tx_pending = txb;
_leave(" = %d", ret);
return ret;
call_terminated:
- rxrpc_free_skb(skb, rxrpc_skb_freed);
+ rxrpc_put_txbuf(txb, rxrpc_txbuf_put_send_aborted);
_leave(" = %d", call->error);
return call->error;
@@ -645,7 +616,6 @@ rxrpc_new_client_call_for_sendmsg(struct rxrpc_sock *rx, struct msghdr *msg,
*/
int rxrpc_do_sendmsg(struct rxrpc_sock *rx, struct msghdr *msg, size_t len)
__releases(&rx->sk.sk_lock.slock)
- __releases(&call->user_mutex)
{
enum rxrpc_call_state state;
struct rxrpc_call *call;
diff --git a/net/rxrpc/skbuff.c b/net/rxrpc/skbuff.c
index 580a5acffee7..0c827d5bb2b8 100644
--- a/net/rxrpc/skbuff.c
+++ b/net/rxrpc/skbuff.c
@@ -14,8 +14,7 @@
#include <net/af_rxrpc.h>
#include "ar-internal.h"
-#define is_tx_skb(skb) (rxrpc_skb(skb)->rx_flags & RXRPC_SKB_TX_BUFFER)
-#define select_skb_count(skb) (is_tx_skb(skb) ? &rxrpc_n_tx_skbs : &rxrpc_n_rx_skbs)
+#define select_skb_count(skb) (&rxrpc_n_rx_skbs)
/*
* Note the allocation or reception of a socket buffer.
@@ -24,8 +23,7 @@ void rxrpc_new_skb(struct sk_buff *skb, enum rxrpc_skb_trace op)
{
const void *here = __builtin_return_address(0);
int n = atomic_inc_return(select_skb_count(skb));
- trace_rxrpc_skb(skb, op, refcount_read(&skb->users), n,
- rxrpc_skb(skb)->rx_flags, here);
+ trace_rxrpc_skb(skb, op, refcount_read(&skb->users), n, here);
}
/*
@@ -36,8 +34,7 @@ void rxrpc_see_skb(struct sk_buff *skb, enum rxrpc_skb_trace op)
const void *here = __builtin_return_address(0);
if (skb) {
int n = atomic_read(select_skb_count(skb));
- trace_rxrpc_skb(skb, op, refcount_read(&skb->users), n,
- rxrpc_skb(skb)->rx_flags, here);
+ trace_rxrpc_skb(skb, op, refcount_read(&skb->users), n, here);
}
}
@@ -48,8 +45,7 @@ void rxrpc_get_skb(struct sk_buff *skb, enum rxrpc_skb_trace op)
{
const void *here = __builtin_return_address(0);
int n = atomic_inc_return(select_skb_count(skb));
- trace_rxrpc_skb(skb, op, refcount_read(&skb->users), n,
- rxrpc_skb(skb)->rx_flags, here);
+ trace_rxrpc_skb(skb, op, refcount_read(&skb->users), n, here);
skb_get(skb);
}
@@ -60,7 +56,7 @@ void rxrpc_eaten_skb(struct sk_buff *skb, enum rxrpc_skb_trace op)
{
const void *here = __builtin_return_address(0);
int n = atomic_inc_return(&rxrpc_n_rx_skbs);
- trace_rxrpc_skb(skb, op, 0, n, 0, here);
+ trace_rxrpc_skb(skb, op, 0, n, here);
}
/*
@@ -72,8 +68,7 @@ void rxrpc_free_skb(struct sk_buff *skb, enum rxrpc_skb_trace op)
if (skb) {
int n;
n = atomic_dec_return(select_skb_count(skb));
- trace_rxrpc_skb(skb, op, refcount_read(&skb->users), n,
- rxrpc_skb(skb)->rx_flags, here);
+ trace_rxrpc_skb(skb, op, refcount_read(&skb->users), n, here);
kfree_skb(skb);
}
}
@@ -88,8 +83,7 @@ void rxrpc_purge_queue(struct sk_buff_head *list)
while ((skb = skb_dequeue((list))) != NULL) {
int n = atomic_dec_return(select_skb_count(skb));
trace_rxrpc_skb(skb, rxrpc_skb_purged,
- refcount_read(&skb->users), n,
- rxrpc_skb(skb)->rx_flags, here);
+ refcount_read(&skb->users), n, here);
kfree_skb(skb);
}
}
diff --git a/net/rxrpc/sysctl.c b/net/rxrpc/sysctl.c
index 555e0910786b..cde3224a5cd2 100644
--- a/net/rxrpc/sysctl.c
+++ b/net/rxrpc/sysctl.c
@@ -14,7 +14,7 @@ static struct ctl_table_header *rxrpc_sysctl_reg_table;
static const unsigned int four = 4;
static const unsigned int max_backlog = RXRPC_BACKLOG_MAX - 1;
static const unsigned int n_65535 = 65535;
-static const unsigned int n_max_acks = RXRPC_RXTX_BUFF_SIZE - 1;
+static const unsigned int n_max_acks = 255;
static const unsigned long one_jiffy = 1;
static const unsigned long max_jiffies = MAX_JIFFY_OFFSET;
@@ -27,15 +27,6 @@ static const unsigned long max_jiffies = MAX_JIFFY_OFFSET;
static struct ctl_table rxrpc_sysctl_table[] = {
/* Values measured in milliseconds but used in jiffies */
{
- .procname = "req_ack_delay",
- .data = &rxrpc_requested_ack_delay,
- .maxlen = sizeof(unsigned long),
- .mode = 0644,
- .proc_handler = proc_doulongvec_ms_jiffies_minmax,
- .extra1 = (void *)&one_jiffy,
- .extra2 = (void *)&max_jiffies,
- },
- {
.procname = "soft_ack_delay",
.data = &rxrpc_soft_ack_delay,
.maxlen = sizeof(unsigned long),
diff --git a/net/rxrpc/txbuf.c b/net/rxrpc/txbuf.c
new file mode 100644
index 000000000000..96bfee89927b
--- /dev/null
+++ b/net/rxrpc/txbuf.c
@@ -0,0 +1,135 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/* RxRPC Tx data buffering.
+ *
+ * Copyright (C) 2022 Red Hat, Inc. All Rights Reserved.
+ * Written by David Howells (dhowells@redhat.com)
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/slab.h>
+#include "ar-internal.h"
+
+static atomic_t rxrpc_txbuf_debug_ids;
+atomic_t rxrpc_nr_txbuf;
+
+/*
+ * Allocate and partially initialise an I/O request structure.
+ */
+struct rxrpc_txbuf *rxrpc_alloc_txbuf(struct rxrpc_call *call, u8 packet_type,
+ gfp_t gfp)
+{
+ struct rxrpc_txbuf *txb;
+
+ txb = kmalloc(sizeof(*txb), gfp);
+ if (txb) {
+ INIT_LIST_HEAD(&txb->call_link);
+ INIT_LIST_HEAD(&txb->tx_link);
+ refcount_set(&txb->ref, 1);
+ txb->call = call;
+ txb->call_debug_id = call->debug_id;
+ txb->debug_id = atomic_inc_return(&rxrpc_txbuf_debug_ids);
+ txb->space = sizeof(txb->data);
+ txb->len = 0;
+ txb->offset = 0;
+ txb->flags = 0;
+ txb->ack_why = 0;
+ txb->seq = call->tx_top + 1;
+ txb->wire.epoch = htonl(call->conn->proto.epoch);
+ txb->wire.cid = htonl(call->cid);
+ txb->wire.callNumber = htonl(call->call_id);
+ txb->wire.seq = htonl(txb->seq);
+ txb->wire.type = packet_type;
+ txb->wire.flags = call->conn->out_clientflag;
+ txb->wire.userStatus = 0;
+ txb->wire.securityIndex = call->security_ix;
+ txb->wire._rsvd = 0;
+ txb->wire.serviceId = htons(call->service_id);
+
+ trace_rxrpc_txbuf(txb->debug_id,
+ txb->call_debug_id, txb->seq, 1,
+ packet_type == RXRPC_PACKET_TYPE_DATA ?
+ rxrpc_txbuf_alloc_data :
+ rxrpc_txbuf_alloc_ack);
+ atomic_inc(&rxrpc_nr_txbuf);
+ }
+
+ return txb;
+}
+
+void rxrpc_get_txbuf(struct rxrpc_txbuf *txb, enum rxrpc_txbuf_trace what)
+{
+ int r;
+
+ __refcount_inc(&txb->ref, &r);
+ trace_rxrpc_txbuf(txb->debug_id, txb->call_debug_id, txb->seq, r + 1, what);
+}
+
+void rxrpc_see_txbuf(struct rxrpc_txbuf *txb, enum rxrpc_txbuf_trace what)
+{
+ int r = refcount_read(&txb->ref);
+
+ trace_rxrpc_txbuf(txb->debug_id, txb->call_debug_id, txb->seq, r, what);
+}
+
+static void rxrpc_free_txbuf(struct rcu_head *rcu)
+{
+ struct rxrpc_txbuf *txb = container_of(rcu, struct rxrpc_txbuf, rcu);
+
+ trace_rxrpc_txbuf(txb->debug_id, txb->call_debug_id, txb->seq, 0,
+ rxrpc_txbuf_free);
+ kfree(txb);
+ atomic_dec(&rxrpc_nr_txbuf);
+}
+
+void rxrpc_put_txbuf(struct rxrpc_txbuf *txb, enum rxrpc_txbuf_trace what)
+{
+ unsigned int debug_id, call_debug_id;
+ rxrpc_seq_t seq;
+ bool dead;
+ int r;
+
+ if (txb) {
+ debug_id = txb->debug_id;
+ call_debug_id = txb->call_debug_id;
+ seq = txb->seq;
+ dead = __refcount_dec_and_test(&txb->ref, &r);
+ trace_rxrpc_txbuf(debug_id, call_debug_id, seq, r - 1, what);
+ if (dead)
+ call_rcu(&txb->rcu, rxrpc_free_txbuf);
+ }
+}
+
+/*
+ * Shrink the transmit buffer.
+ */
+void rxrpc_shrink_call_tx_buffer(struct rxrpc_call *call)
+{
+ struct rxrpc_txbuf *txb;
+ rxrpc_seq_t hard_ack = smp_load_acquire(&call->acks_hard_ack);
+
+ _enter("%x/%x/%x", call->tx_bottom, call->acks_hard_ack, call->tx_top);
+
+ for (;;) {
+ spin_lock(&call->tx_lock);
+ txb = list_first_entry_or_null(&call->tx_buffer,
+ struct rxrpc_txbuf, call_link);
+ if (!txb)
+ break;
+ hard_ack = smp_load_acquire(&call->acks_hard_ack);
+ if (before(hard_ack, txb->seq))
+ break;
+
+ ASSERTCMP(txb->seq, ==, call->tx_bottom + 1);
+ call->tx_bottom++;
+ list_del_rcu(&txb->call_link);
+
+ trace_rxrpc_txqueue(call, rxrpc_txqueue_dequeue);
+
+ spin_unlock(&call->tx_lock);
+
+ rxrpc_put_txbuf(txb, rxrpc_txbuf_put_rotated);
+ }
+
+ spin_unlock(&call->tx_lock);
+}
diff --git a/net/sched/act_ct.c b/net/sched/act_ct.c
index b38d91d6b249..da0b7f665277 100644
--- a/net/sched/act_ct.c
+++ b/net/sched/act_ct.c
@@ -33,6 +33,7 @@
#include <net/netfilter/nf_conntrack_acct.h>
#include <net/netfilter/ipv6/nf_defrag_ipv6.h>
#include <net/netfilter/nf_conntrack_act_ct.h>
+#include <net/netfilter/nf_conntrack_seqadj.h>
#include <uapi/linux/netfilter/nf_nat.h>
static struct workqueue_struct *act_ct_wq;
@@ -345,11 +346,9 @@ static void tcf_ct_flow_table_cleanup_work(struct work_struct *work)
module_put(THIS_MODULE);
}
-static void tcf_ct_flow_table_put(struct tcf_ct_params *params)
+static void tcf_ct_flow_table_put(struct tcf_ct_flow_table *ct_ft)
{
- struct tcf_ct_flow_table *ct_ft = params->ct_ft;
-
- if (refcount_dec_and_test(&params->ct_ft->ref)) {
+ if (refcount_dec_and_test(&ct_ft->ref)) {
rhashtable_remove_fast(&zones_ht, &ct_ft->node, zones_params);
INIT_RCU_WORK(&ct_ft->rwork, tcf_ct_flow_table_cleanup_work);
queue_rcu_work(act_ct_wq, &ct_ft->rwork);
@@ -657,7 +656,7 @@ struct tc_ct_action_net {
/* Determine whether skb->_nfct is equal to the result of conntrack lookup. */
static bool tcf_ct_skb_nfct_cached(struct net *net, struct sk_buff *skb,
- u16 zone_id, bool force)
+ struct tcf_ct_params *p)
{
enum ip_conntrack_info ctinfo;
struct nf_conn *ct;
@@ -667,11 +666,19 @@ static bool tcf_ct_skb_nfct_cached(struct net *net, struct sk_buff *skb,
return false;
if (!net_eq(net, read_pnet(&ct->ct_net)))
goto drop_ct;
- if (nf_ct_zone(ct)->id != zone_id)
+ if (nf_ct_zone(ct)->id != p->zone)
goto drop_ct;
+ if (p->helper) {
+ struct nf_conn_help *help;
+
+ help = nf_ct_ext_find(ct, NF_CT_EXT_HELPER);
+ if (help && rcu_access_pointer(help->helper) != p->helper)
+ goto drop_ct;
+ }
/* Force conntrack entry direction. */
- if (force && CTINFO2DIR(ctinfo) != IP_CT_DIR_ORIGINAL) {
+ if ((p->ct_action & TCA_CT_ACT_FORCE) &&
+ CTINFO2DIR(ctinfo) != IP_CT_DIR_ORIGINAL) {
if (nf_ct_is_confirmed(ct))
nf_ct_kill(ct);
@@ -832,18 +839,30 @@ out_free:
return err;
}
-static void tcf_ct_params_free(struct rcu_head *head)
+static void tcf_ct_params_free(struct tcf_ct_params *params)
{
- struct tcf_ct_params *params = container_of(head,
- struct tcf_ct_params, rcu);
-
- tcf_ct_flow_table_put(params);
-
+ if (params->helper) {
+#if IS_ENABLED(CONFIG_NF_NAT)
+ if (params->ct_action & TCA_CT_ACT_NAT)
+ nf_nat_helper_put(params->helper);
+#endif
+ nf_conntrack_helper_put(params->helper);
+ }
+ if (params->ct_ft)
+ tcf_ct_flow_table_put(params->ct_ft);
if (params->tmpl)
nf_ct_put(params->tmpl);
kfree(params);
}
+static void tcf_ct_params_free_rcu(struct rcu_head *head)
+{
+ struct tcf_ct_params *params;
+
+ params = container_of(head, struct tcf_ct_params, rcu);
+ tcf_ct_params_free(params);
+}
+
#if IS_ENABLED(CONFIG_NF_NAT)
/* Modelled after nf_nat_ipv[46]_fn().
* range is only used for new, uninitialized NAT state.
@@ -1023,13 +1042,14 @@ static int tcf_ct_act(struct sk_buff *skb, const struct tc_action *a,
struct tcf_result *res)
{
struct net *net = dev_net(skb->dev);
- bool cached, commit, clear, force;
enum ip_conntrack_info ctinfo;
struct tcf_ct *c = to_ct(a);
struct nf_conn *tmpl = NULL;
struct nf_hook_state state;
+ bool cached, commit, clear;
int nh_ofs, err, retval;
struct tcf_ct_params *p;
+ bool add_helper = false;
bool skip_add = false;
bool defrag = false;
struct nf_conn *ct;
@@ -1040,7 +1060,6 @@ static int tcf_ct_act(struct sk_buff *skb, const struct tc_action *a,
retval = READ_ONCE(c->tcf_action);
commit = p->ct_action & TCA_CT_ACT_COMMIT;
clear = p->ct_action & TCA_CT_ACT_CLEAR;
- force = p->ct_action & TCA_CT_ACT_FORCE;
tmpl = p->tmpl;
tcf_lastuse_update(&c->tcf_tm);
@@ -1083,7 +1102,7 @@ static int tcf_ct_act(struct sk_buff *skb, const struct tc_action *a,
* actually run the packet through conntrack twice unless it's for a
* different zone.
*/
- cached = tcf_ct_skb_nfct_cached(net, skb, p->zone, force);
+ cached = tcf_ct_skb_nfct_cached(net, skb, p);
if (!cached) {
if (tcf_ct_flow_table_lookup(p, skb, family)) {
skip_add = true;
@@ -1116,6 +1135,22 @@ do_nat:
if (err != NF_ACCEPT)
goto drop;
+ if (!nf_ct_is_confirmed(ct) && commit && p->helper && !nfct_help(ct)) {
+ err = __nf_ct_try_assign_helper(ct, p->tmpl, GFP_ATOMIC);
+ if (err)
+ goto drop;
+ add_helper = true;
+ if (p->ct_action & TCA_CT_ACT_NAT && !nfct_seqadj(ct)) {
+ if (!nfct_seqadj_ext_add(ct))
+ goto drop;
+ }
+ }
+
+ if (nf_ct_is_confirmed(ct) ? ((!cached && !skip_add) || add_helper) : commit) {
+ if (nf_ct_helper(skb, ct, ctinfo, family) != NF_ACCEPT)
+ goto drop;
+ }
+
if (commit) {
tcf_ct_act_set_mark(ct, p->mark, p->mark_mask);
tcf_ct_act_set_labels(ct, p->labels, p->labels_mask);
@@ -1164,6 +1199,9 @@ static const struct nla_policy ct_policy[TCA_CT_MAX + 1] = {
[TCA_CT_NAT_IPV6_MAX] = NLA_POLICY_EXACT_LEN(sizeof(struct in6_addr)),
[TCA_CT_NAT_PORT_MIN] = { .type = NLA_U16 },
[TCA_CT_NAT_PORT_MAX] = { .type = NLA_U16 },
+ [TCA_CT_HELPER_NAME] = { .type = NLA_STRING, .len = NF_CT_HELPER_NAME_LEN },
+ [TCA_CT_HELPER_FAMILY] = { .type = NLA_U8 },
+ [TCA_CT_HELPER_PROTO] = { .type = NLA_U8 },
};
static int tcf_ct_fill_params_nat(struct tcf_ct_params *p,
@@ -1253,8 +1291,9 @@ static int tcf_ct_fill_params(struct net *net,
{
struct tc_ct_action_net *tn = net_generic(net, act_ct_ops.net_id);
struct nf_conntrack_zone zone;
+ int err, family, proto, len;
struct nf_conn *tmpl;
- int err;
+ char *name;
p->zone = NF_CT_DEFAULT_ZONE_ID;
@@ -1315,10 +1354,31 @@ static int tcf_ct_fill_params(struct net *net,
NL_SET_ERR_MSG_MOD(extack, "Failed to allocate conntrack template");
return -ENOMEM;
}
- __set_bit(IPS_CONFIRMED_BIT, &tmpl->status);
p->tmpl = tmpl;
+ if (tb[TCA_CT_HELPER_NAME]) {
+ name = nla_data(tb[TCA_CT_HELPER_NAME]);
+ len = nla_len(tb[TCA_CT_HELPER_NAME]);
+ if (len > 16 || name[len - 1] != '\0') {
+ NL_SET_ERR_MSG_MOD(extack, "Failed to parse helper name.");
+ err = -EINVAL;
+ goto err;
+ }
+ family = tb[TCA_CT_HELPER_FAMILY] ? nla_get_u8(tb[TCA_CT_HELPER_FAMILY]) : AF_INET;
+ proto = tb[TCA_CT_HELPER_PROTO] ? nla_get_u8(tb[TCA_CT_HELPER_PROTO]) : IPPROTO_TCP;
+ err = nf_ct_add_helper(tmpl, name, family, proto,
+ p->ct_action & TCA_CT_ACT_NAT, &p->helper);
+ if (err) {
+ NL_SET_ERR_MSG_MOD(extack, "Failed to add helper");
+ goto err;
+ }
+ }
+ __set_bit(IPS_CONFIRMED_BIT, &tmpl->status);
return 0;
+err:
+ nf_ct_put(p->tmpl);
+ p->tmpl = NULL;
+ return err;
}
static int tcf_ct_init(struct net *net, struct nlattr *nla,
@@ -1390,7 +1450,7 @@ static int tcf_ct_init(struct net *net, struct nlattr *nla,
err = tcf_ct_flow_table_get(net, params);
if (err)
- goto cleanup_params;
+ goto cleanup;
spin_lock_bh(&c->tcf_lock);
goto_ch = tcf_action_set_ctrlact(*a, parm->action, goto_ch);
@@ -1401,17 +1461,15 @@ static int tcf_ct_init(struct net *net, struct nlattr *nla,
if (goto_ch)
tcf_chain_put_by_act(goto_ch);
if (params)
- call_rcu(&params->rcu, tcf_ct_params_free);
+ call_rcu(&params->rcu, tcf_ct_params_free_rcu);
return res;
-cleanup_params:
- if (params->tmpl)
- nf_ct_put(params->tmpl);
cleanup:
if (goto_ch)
tcf_chain_put_by_act(goto_ch);
- kfree(params);
+ if (params)
+ tcf_ct_params_free(params);
tcf_idr_release(*a, bind);
return err;
}
@@ -1423,7 +1481,7 @@ static void tcf_ct_cleanup(struct tc_action *a)
params = rcu_dereference_protected(c->params, 1);
if (params)
- call_rcu(&params->rcu, tcf_ct_params_free);
+ call_rcu(&params->rcu, tcf_ct_params_free_rcu);
}
static int tcf_ct_dump_key_val(struct sk_buff *skb,
@@ -1489,6 +1547,19 @@ static int tcf_ct_dump_nat(struct sk_buff *skb, struct tcf_ct_params *p)
return 0;
}
+static int tcf_ct_dump_helper(struct sk_buff *skb, struct nf_conntrack_helper *helper)
+{
+ if (!helper)
+ return 0;
+
+ if (nla_put_string(skb, TCA_CT_HELPER_NAME, helper->name) ||
+ nla_put_u8(skb, TCA_CT_HELPER_FAMILY, helper->tuple.src.l3num) ||
+ nla_put_u8(skb, TCA_CT_HELPER_PROTO, helper->tuple.dst.protonum))
+ return -1;
+
+ return 0;
+}
+
static inline int tcf_ct_dump(struct sk_buff *skb, struct tc_action *a,
int bind, int ref)
{
@@ -1541,6 +1612,9 @@ static inline int tcf_ct_dump(struct sk_buff *skb, struct tc_action *a,
if (tcf_ct_dump_nat(skb, p))
goto nla_put_failure;
+ if (tcf_ct_dump_helper(skb, p->helper))
+ goto nla_put_failure;
+
skip_dump:
if (nla_put(skb, TCA_CT_PARMS, sizeof(opt), &opt))
goto nla_put_failure;
diff --git a/net/sched/act_skbedit.c b/net/sched/act_skbedit.c
index 7f598784fd30..1710780c908a 100644
--- a/net/sched/act_skbedit.c
+++ b/net/sched/act_skbedit.c
@@ -148,6 +148,11 @@ static int tcf_skbedit_init(struct net *net, struct nlattr *nla,
}
if (tb[TCA_SKBEDIT_QUEUE_MAPPING] != NULL) {
+ if (is_tcf_skbedit_ingress(act_flags) &&
+ !(act_flags & TCA_ACT_FLAGS_SKIP_SW)) {
+ NL_SET_ERR_MSG_MOD(extack, "\"queue_mapping\" option on receive side is hardware only, use skip_sw");
+ return -EOPNOTSUPP;
+ }
flags |= SKBEDIT_F_QUEUE_MAPPING;
queue_mapping = nla_data(tb[TCA_SKBEDIT_QUEUE_MAPPING]);
}
@@ -374,9 +379,12 @@ static int tcf_skbedit_offload_act_setup(struct tc_action *act, void *entry_data
} else if (is_tcf_skbedit_priority(act)) {
entry->id = FLOW_ACTION_PRIORITY;
entry->priority = tcf_skbedit_priority(act);
- } else if (is_tcf_skbedit_queue_mapping(act)) {
- NL_SET_ERR_MSG_MOD(extack, "Offload not supported when \"queue_mapping\" option is used");
+ } else if (is_tcf_skbedit_tx_queue_mapping(act)) {
+ NL_SET_ERR_MSG_MOD(extack, "Offload not supported when \"queue_mapping\" option is used on transmit side");
return -EOPNOTSUPP;
+ } else if (is_tcf_skbedit_rx_queue_mapping(act)) {
+ entry->id = FLOW_ACTION_RX_QUEUE_MAPPING;
+ entry->rx_queue = tcf_skbedit_rx_queue_mapping(act);
} else if (is_tcf_skbedit_inheritdsfield(act)) {
NL_SET_ERR_MSG_MOD(extack, "Offload not supported when \"inheritdsfield\" option is used");
return -EOPNOTSUPP;
@@ -394,6 +402,8 @@ static int tcf_skbedit_offload_act_setup(struct tc_action *act, void *entry_data
fl_action->id = FLOW_ACTION_PTYPE;
else if (is_tcf_skbedit_priority(act))
fl_action->id = FLOW_ACTION_PRIORITY;
+ else if (is_tcf_skbedit_rx_queue_mapping(act))
+ fl_action->id = FLOW_ACTION_RX_QUEUE_MAPPING;
else
return -EOPNOTSUPP;
}
diff --git a/net/sched/cls_api.c b/net/sched/cls_api.c
index 50566db45949..23d1cfa4f58c 100644
--- a/net/sched/cls_api.c
+++ b/net/sched/cls_api.c
@@ -1953,6 +1953,11 @@ static void tfilter_put(struct tcf_proto *tp, void *fh)
tp->ops->put(tp, fh);
}
+static bool is_qdisc_ingress(__u32 classid)
+{
+ return (TC_H_MIN(classid) == TC_H_MIN(TC_H_MIN_INGRESS));
+}
+
static int tc_new_tfilter(struct sk_buff *skb, struct nlmsghdr *n,
struct netlink_ext_ack *extack)
{
@@ -2144,6 +2149,8 @@ replay:
flags |= TCA_ACT_FLAGS_REPLACE;
if (!rtnl_held)
flags |= TCA_ACT_FLAGS_NO_RTNL;
+ if (is_qdisc_ingress(parent))
+ flags |= TCA_ACT_FLAGS_AT_INGRESS;
err = tp->ops->change(net, skb, tp, cl, t->tcm_handle, tca, &fh,
flags, extack);
if (err == 0) {
diff --git a/net/sctp/associola.c b/net/sctp/associola.c
index 3460abceba44..63ba5551c13f 100644
--- a/net/sctp/associola.c
+++ b/net/sctp/associola.c
@@ -226,8 +226,7 @@ static struct sctp_association *sctp_association_init(
/* Create an output queue. */
sctp_outq_init(asoc, &asoc->outqueue);
- if (!sctp_ulpq_init(&asoc->ulpq, asoc))
- goto fail_init;
+ sctp_ulpq_init(&asoc->ulpq, asoc);
if (sctp_stream_init(&asoc->stream, asoc->c.sinit_num_ostreams, 0, gfp))
goto stream_free;
@@ -277,7 +276,6 @@ static struct sctp_association *sctp_association_init(
stream_free:
sctp_stream_free(&asoc->stream);
-fail_init:
sock_put(asoc->base.sk);
sctp_endpoint_put(asoc->ep);
return NULL;
diff --git a/net/sctp/socket.c b/net/sctp/socket.c
index 83628c347744..3e83963d1b8a 100644
--- a/net/sctp/socket.c
+++ b/net/sctp/socket.c
@@ -5098,13 +5098,17 @@ static void sctp_destroy_sock(struct sock *sk)
}
/* Triggered when there are no references on the socket anymore */
-static void sctp_destruct_sock(struct sock *sk)
+static void sctp_destruct_common(struct sock *sk)
{
struct sctp_sock *sp = sctp_sk(sk);
/* Free up the HMAC transform. */
crypto_free_shash(sp->hmac);
+}
+static void sctp_destruct_sock(struct sock *sk)
+{
+ sctp_destruct_common(sk);
inet_sock_destruct(sk);
}
@@ -9427,7 +9431,7 @@ void sctp_copy_sock(struct sock *newsk, struct sock *sk,
sctp_sk(newsk)->reuse = sp->reuse;
newsk->sk_shutdown = sk->sk_shutdown;
- newsk->sk_destruct = sctp_destruct_sock;
+ newsk->sk_destruct = sk->sk_destruct;
newsk->sk_family = sk->sk_family;
newsk->sk_protocol = IPPROTO_SCTP;
newsk->sk_backlog_rcv = sk->sk_prot->backlog_rcv;
@@ -9662,11 +9666,20 @@ struct proto sctp_prot = {
#if IS_ENABLED(CONFIG_IPV6)
-#include <net/transp_v6.h>
-static void sctp_v6_destroy_sock(struct sock *sk)
+static void sctp_v6_destruct_sock(struct sock *sk)
+{
+ sctp_destruct_common(sk);
+ inet6_sock_destruct(sk);
+}
+
+static int sctp_v6_init_sock(struct sock *sk)
{
- sctp_destroy_sock(sk);
- inet6_destroy_sock(sk);
+ int ret = sctp_init_sock(sk);
+
+ if (!ret)
+ sk->sk_destruct = sctp_v6_destruct_sock;
+
+ return ret;
}
struct proto sctpv6_prot = {
@@ -9676,8 +9689,8 @@ struct proto sctpv6_prot = {
.disconnect = sctp_disconnect,
.accept = sctp_accept,
.ioctl = sctp_ioctl,
- .init = sctp_init_sock,
- .destroy = sctp_v6_destroy_sock,
+ .init = sctp_v6_init_sock,
+ .destroy = sctp_destroy_sock,
.shutdown = sctp_shutdown,
.setsockopt = sctp_setsockopt,
.getsockopt = sctp_getsockopt,
diff --git a/net/sctp/stream_interleave.c b/net/sctp/stream_interleave.c
index bb22b71df7a3..94727feb07b3 100644
--- a/net/sctp/stream_interleave.c
+++ b/net/sctp/stream_interleave.c
@@ -490,11 +490,8 @@ static int sctp_enqueue_event(struct sctp_ulpq *ulpq,
if (!sctp_ulpevent_is_enabled(event, ulpq->asoc->subscribe))
goto out_free;
- if (skb_list)
- skb_queue_splice_tail_init(skb_list,
- &sk->sk_receive_queue);
- else
- __skb_queue_tail(&sk->sk_receive_queue, skb);
+ skb_queue_splice_tail_init(skb_list,
+ &sk->sk_receive_queue);
if (!sp->data_ready_signalled) {
sp->data_ready_signalled = 1;
@@ -504,10 +501,7 @@ static int sctp_enqueue_event(struct sctp_ulpq *ulpq,
return 1;
out_free:
- if (skb_list)
- sctp_queue_purge_ulpevents(skb_list);
- else
- sctp_ulpevent_free(event);
+ sctp_queue_purge_ulpevents(skb_list);
return 0;
}
diff --git a/net/sctp/ulpqueue.c b/net/sctp/ulpqueue.c
index 0a8510a0c5e6..b05daafd369a 100644
--- a/net/sctp/ulpqueue.c
+++ b/net/sctp/ulpqueue.c
@@ -38,8 +38,7 @@ static void sctp_ulpq_reasm_drain(struct sctp_ulpq *ulpq);
/* 1st Level Abstractions */
/* Initialize a ULP queue from a block of memory. */
-struct sctp_ulpq *sctp_ulpq_init(struct sctp_ulpq *ulpq,
- struct sctp_association *asoc)
+void sctp_ulpq_init(struct sctp_ulpq *ulpq, struct sctp_association *asoc)
{
memset(ulpq, 0, sizeof(struct sctp_ulpq));
@@ -48,8 +47,6 @@ struct sctp_ulpq *sctp_ulpq_init(struct sctp_ulpq *ulpq,
skb_queue_head_init(&ulpq->reasm_uo);
skb_queue_head_init(&ulpq->lobby);
ulpq->pd_mode = 0;
-
- return ulpq;
}
@@ -259,10 +256,7 @@ int sctp_ulpq_tail_event(struct sctp_ulpq *ulpq, struct sk_buff_head *skb_list)
return 1;
out_free:
- if (skb_list)
- sctp_queue_purge_ulpevents(skb_list);
- else
- sctp_ulpevent_free(event);
+ sctp_queue_purge_ulpevents(skb_list);
return 0;
}
diff --git a/net/socket.c b/net/socket.c
index 00da9ce3dba0..55c5d536e5f6 100644
--- a/net/socket.c
+++ b/net/socket.c
@@ -2199,13 +2199,7 @@ SYSCALL_DEFINE4(recv, int, fd, void __user *, ubuf, size_t, size,
static bool sock_use_custom_sol_socket(const struct socket *sock)
{
- const struct sock *sk = sock->sk;
-
- /* Use sock->ops->setsockopt() for MPTCP */
- return IS_ENABLED(CONFIG_MPTCP) &&
- sk->sk_protocol == IPPROTO_MPTCP &&
- sk->sk_type == SOCK_STREAM &&
- (sk->sk_family == AF_INET || sk->sk_family == AF_INET6);
+ return test_bit(SOCK_CUSTOM_SOCKOPT, &sock->flags);
}
/*
diff --git a/net/wireless/core.h b/net/wireless/core.h
index 775e16cb99ed..af85d8909935 100644
--- a/net/wireless/core.h
+++ b/net/wireless/core.h
@@ -271,6 +271,8 @@ struct cfg80211_event {
} ij;
struct {
u8 bssid[ETH_ALEN];
+ const u8 *td_bitmap;
+ u8 td_bitmap_len;
} pa;
};
};
@@ -409,7 +411,8 @@ int cfg80211_disconnect(struct cfg80211_registered_device *rdev,
bool wextev);
void __cfg80211_roamed(struct wireless_dev *wdev,
struct cfg80211_roam_info *info);
-void __cfg80211_port_authorized(struct wireless_dev *wdev, const u8 *bssid);
+void __cfg80211_port_authorized(struct wireless_dev *wdev, const u8 *bssid,
+ const u8 *td_bitmap, u8 td_bitmap_len);
int cfg80211_mgd_wext_connect(struct cfg80211_registered_device *rdev,
struct wireless_dev *wdev);
void cfg80211_autodisconnect_wk(struct work_struct *work);
diff --git a/net/wireless/mlme.c b/net/wireless/mlme.c
index 581df7f4c524..58e1fb18f85a 100644
--- a/net/wireless/mlme.c
+++ b/net/wireless/mlme.c
@@ -42,6 +42,10 @@ void cfg80211_rx_assoc_resp(struct net_device *dev,
unsigned int link_id;
for (link_id = 0; link_id < ARRAY_SIZE(data->links); link_id++) {
+ cr.links[link_id].status = data->links[link_id].status;
+ WARN_ON_ONCE(cr.links[link_id].status != WLAN_STATUS_SUCCESS &&
+ (!cr.ap_mld_addr || !cr.links[link_id].bss));
+
cr.links[link_id].bss = data->links[link_id].bss;
if (!cr.links[link_id].bss)
continue;
diff --git a/net/wireless/nl80211.c b/net/wireless/nl80211.c
index 597c52236514..1ad0326ff4dc 100644
--- a/net/wireless/nl80211.c
+++ b/net/wireless/nl80211.c
@@ -7780,6 +7780,7 @@ static int nl80211_set_bss(struct sk_buff *skb, struct genl_info *info)
int err;
memset(&params, 0, sizeof(params));
+ params.link_id = nl80211_link_id_or_invalid(info->attrs);
/* default to not changing parameters */
params.use_cts_prot = -1;
params.use_short_preamble = -1;
@@ -16139,7 +16140,8 @@ static u32 nl80211_internal_flags[] = {
#undef SELECTOR
};
-static int nl80211_pre_doit(const struct genl_ops *ops, struct sk_buff *skb,
+static int nl80211_pre_doit(const struct genl_split_ops *ops,
+ struct sk_buff *skb,
struct genl_info *info)
{
struct cfg80211_registered_device *rdev = NULL;
@@ -16240,7 +16242,8 @@ out_unlock:
return err;
}
-static void nl80211_post_doit(const struct genl_ops *ops, struct sk_buff *skb,
+static void nl80211_post_doit(const struct genl_split_ops *ops,
+ struct sk_buff *skb,
struct genl_info *info)
{
u32 internal_flags = nl80211_internal_flags[ops->internal_flags];
@@ -16566,7 +16569,8 @@ static const struct genl_small_ops nl80211_small_ops[] = {
.validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
.doit = nl80211_set_bss,
.flags = GENL_UNS_ADMIN_PERM,
- .internal_flags = IFLAGS(NL80211_FLAG_NEED_NETDEV_UP),
+ .internal_flags = IFLAGS(NL80211_FLAG_NEED_NETDEV_UP |
+ NL80211_FLAG_MLO_VALID_LINK_ID),
},
{
.cmd = NL80211_CMD_GET_REG,
@@ -17747,6 +17751,7 @@ void nl80211_send_connect_result(struct cfg80211_registered_device *rdev,
link_info_size += (cr->links[link].bssid ||
cr->links[link].bss) ?
nla_total_size(ETH_ALEN) : 0;
+ link_info_size += nla_total_size(sizeof(u16));
}
}
@@ -17815,7 +17820,9 @@ void nl80211_send_connect_result(struct cfg80211_registered_device *rdev,
nla_put(msg, NL80211_ATTR_BSSID, ETH_ALEN, bssid)) ||
(cr->links[link].addr &&
nla_put(msg, NL80211_ATTR_MAC, ETH_ALEN,
- cr->links[link].addr)))
+ cr->links[link].addr)) ||
+ nla_put_u16(msg, NL80211_ATTR_STATUS_CODE,
+ cr->links[link].status))
goto nla_put_failure;
nla_nest_end(msg, nested_mlo_links);
@@ -17939,7 +17946,8 @@ void nl80211_send_roamed(struct cfg80211_registered_device *rdev,
}
void nl80211_send_port_authorized(struct cfg80211_registered_device *rdev,
- struct net_device *netdev, const u8 *bssid)
+ struct net_device *netdev, const u8 *bssid,
+ const u8 *td_bitmap, u8 td_bitmap_len)
{
struct sk_buff *msg;
void *hdr;
@@ -17959,6 +17967,11 @@ void nl80211_send_port_authorized(struct cfg80211_registered_device *rdev,
nla_put(msg, NL80211_ATTR_MAC, ETH_ALEN, bssid))
goto nla_put_failure;
+ if ((td_bitmap_len > 0) && td_bitmap)
+ if (nla_put(msg, NL80211_ATTR_TD_BITMAP,
+ td_bitmap_len, td_bitmap))
+ goto nla_put_failure;
+
genlmsg_end(msg, hdr);
genlmsg_multicast_netns(&nl80211_fam, wiphy_net(&rdev->wiphy), msg, 0,
diff --git a/net/wireless/nl80211.h b/net/wireless/nl80211.h
index 855d540ddfb9..ba9457e94c43 100644
--- a/net/wireless/nl80211.h
+++ b/net/wireless/nl80211.h
@@ -83,7 +83,8 @@ void nl80211_send_roamed(struct cfg80211_registered_device *rdev,
struct net_device *netdev,
struct cfg80211_roam_info *info, gfp_t gfp);
void nl80211_send_port_authorized(struct cfg80211_registered_device *rdev,
- struct net_device *netdev, const u8 *bssid);
+ struct net_device *netdev, const u8 *bssid,
+ const u8 *td_bitmap, u8 td_bitmap_len);
void nl80211_send_disconnected(struct cfg80211_registered_device *rdev,
struct net_device *netdev, u16 reason,
const u8 *ie, size_t ie_len, bool from_ap);
diff --git a/net/wireless/sme.c b/net/wireless/sme.c
index d513536617bd..4b5b6ee0fe01 100644
--- a/net/wireless/sme.c
+++ b/net/wireless/sme.c
@@ -793,6 +793,10 @@ void __cfg80211_connect_result(struct net_device *dev,
}
for_each_valid_link(cr, link) {
+ /* don't do extra lookups for failures */
+ if (cr->links[link].status != WLAN_STATUS_SUCCESS)
+ continue;
+
if (cr->links[link].bss)
continue;
@@ -829,6 +833,16 @@ void __cfg80211_connect_result(struct net_device *dev,
}
memset(wdev->links, 0, sizeof(wdev->links));
+ for_each_valid_link(cr, link) {
+ if (cr->links[link].status == WLAN_STATUS_SUCCESS)
+ continue;
+ cr->valid_links &= ~BIT(link);
+ /* don't require bss pointer for failed links */
+ if (!cr->links[link].bss)
+ continue;
+ cfg80211_unhold_bss(bss_from_pub(cr->links[link].bss));
+ cfg80211_put_bss(wdev->wiphy, cr->links[link].bss);
+ }
wdev->valid_links = cr->valid_links;
for_each_valid_link(cr, link)
wdev->links[link].client.current_bss =
@@ -1237,7 +1251,8 @@ out:
}
EXPORT_SYMBOL(cfg80211_roamed);
-void __cfg80211_port_authorized(struct wireless_dev *wdev, const u8 *bssid)
+void __cfg80211_port_authorized(struct wireless_dev *wdev, const u8 *bssid,
+ const u8 *td_bitmap, u8 td_bitmap_len)
{
ASSERT_WDEV_LOCK(wdev);
@@ -1250,11 +1265,11 @@ void __cfg80211_port_authorized(struct wireless_dev *wdev, const u8 *bssid)
return;
nl80211_send_port_authorized(wiphy_to_rdev(wdev->wiphy), wdev->netdev,
- bssid);
+ bssid, td_bitmap, td_bitmap_len);
}
void cfg80211_port_authorized(struct net_device *dev, const u8 *bssid,
- gfp_t gfp)
+ const u8 *td_bitmap, u8 td_bitmap_len, gfp_t gfp)
{
struct wireless_dev *wdev = dev->ieee80211_ptr;
struct cfg80211_registered_device *rdev = wiphy_to_rdev(wdev->wiphy);
@@ -1264,12 +1279,15 @@ void cfg80211_port_authorized(struct net_device *dev, const u8 *bssid,
if (WARN_ON(!bssid))
return;
- ev = kzalloc(sizeof(*ev), gfp);
+ ev = kzalloc(sizeof(*ev) + td_bitmap_len, gfp);
if (!ev)
return;
ev->type = EVENT_PORT_AUTHORIZED;
memcpy(ev->pa.bssid, bssid, ETH_ALEN);
+ ev->pa.td_bitmap = ((u8 *)ev) + sizeof(*ev);
+ ev->pa.td_bitmap_len = td_bitmap_len;
+ memcpy((void *)ev->pa.td_bitmap, td_bitmap, td_bitmap_len);
/*
* Use the wdev event list so that if there are pending
diff --git a/net/wireless/util.c b/net/wireless/util.c
index 39680e7bad45..8f403f9fe816 100644
--- a/net/wireless/util.c
+++ b/net/wireless/util.c
@@ -990,7 +990,9 @@ void cfg80211_process_wdev_events(struct wireless_dev *wdev)
__cfg80211_leave(wiphy_to_rdev(wdev->wiphy), wdev);
break;
case EVENT_PORT_AUTHORIZED:
- __cfg80211_port_authorized(wdev, ev->pa.bssid);
+ __cfg80211_port_authorized(wdev, ev->pa.bssid,
+ ev->pa.td_bitmap,
+ ev->pa.td_bitmap_len);
break;
}
wdev_unlock(wdev);