summaryrefslogtreecommitdiffstats
path: root/net/core/netdev_rx_queue.c
diff options
context:
space:
mode:
authorJakub Kicinski <kuba@kernel.org>2025-04-02 18:34:04 -0700
committerJakub Kicinski <kuba@kernel.org>2025-04-04 07:35:38 -0700
commitec304b70d46bd2ed66541c5b57b63276529e05b1 (patch)
treecdf44a3e05d4cb02eaa6b166857cf49d0993df78 /net/core/netdev_rx_queue.c
parent053f3ff67d7feefc75797863f3d84b47ad47086f (diff)
downloadlinux-ec304b70d46bd2ed66541c5b57b63276529e05b1.tar.gz
linux-ec304b70d46bd2ed66541c5b57b63276529e05b1.tar.bz2
linux-ec304b70d46bd2ed66541c5b57b63276529e05b1.zip
net: move mp dev config validation to __net_mp_open_rxq()
devmem code performs a number of safety checks to avoid having to reimplement all of them in the drivers. Move those to __net_mp_open_rxq() and reuse that function for binding to make sure that io_uring ZC also benefits from them. While at it rename the queue ID variable to rxq_idx in __net_mp_open_rxq(), we touch most of the relevant lines. The XArray insertion is reordered after the netdev_rx_queue_restart() call, otherwise we'd need to duplicate the queue index check or risk inserting an invalid pointer. The XArray allocation failures should be extremely rare. Reviewed-by: Mina Almasry <almasrymina@google.com> Acked-by: Stanislav Fomichev <sdf@fomichev.me> Fixes: 6e18ed929d3b ("net: add helpers for setting a memory provider on an rx queue") Link: https://patch.msgid.link/20250403013405.2827250-2-kuba@kernel.org Signed-off-by: Jakub Kicinski <kuba@kernel.org>
Diffstat (limited to 'net/core/netdev_rx_queue.c')
-rw-r--r--net/core/netdev_rx_queue.c49
1 files changed, 38 insertions, 11 deletions
diff --git a/net/core/netdev_rx_queue.c b/net/core/netdev_rx_queue.c
index 3af716f77a13..556b5393ec9f 100644
--- a/net/core/netdev_rx_queue.c
+++ b/net/core/netdev_rx_queue.c
@@ -1,5 +1,6 @@
// SPDX-License-Identifier: GPL-2.0-or-later
+#include <linux/ethtool_netlink.h>
#include <linux/netdevice.h>
#include <net/netdev_lock.h>
#include <net/netdev_queues.h>
@@ -86,8 +87,9 @@ err_free_new_mem:
}
EXPORT_SYMBOL_NS_GPL(netdev_rx_queue_restart, "NETDEV_INTERNAL");
-static int __net_mp_open_rxq(struct net_device *dev, unsigned ifq_idx,
- struct pp_memory_provider_params *p)
+int __net_mp_open_rxq(struct net_device *dev, unsigned int rxq_idx,
+ const struct pp_memory_provider_params *p,
+ struct netlink_ext_ack *extack)
{
struct netdev_rx_queue *rxq;
int ret;
@@ -95,16 +97,41 @@ static int __net_mp_open_rxq(struct net_device *dev, unsigned ifq_idx,
if (!netdev_need_ops_lock(dev))
return -EOPNOTSUPP;
- if (ifq_idx >= dev->real_num_rx_queues)
+ if (rxq_idx >= dev->real_num_rx_queues)
return -EINVAL;
- ifq_idx = array_index_nospec(ifq_idx, dev->real_num_rx_queues);
+ rxq_idx = array_index_nospec(rxq_idx, dev->real_num_rx_queues);
- rxq = __netif_get_rx_queue(dev, ifq_idx);
- if (rxq->mp_params.mp_ops)
+ if (rxq_idx >= dev->real_num_rx_queues) {
+ NL_SET_ERR_MSG(extack, "rx queue index out of range");
+ return -ERANGE;
+ }
+ if (dev->cfg->hds_config != ETHTOOL_TCP_DATA_SPLIT_ENABLED) {
+ NL_SET_ERR_MSG(extack, "tcp-data-split is disabled");
+ return -EINVAL;
+ }
+ if (dev->cfg->hds_thresh) {
+ NL_SET_ERR_MSG(extack, "hds-thresh is not zero");
+ return -EINVAL;
+ }
+ if (dev_xdp_prog_count(dev)) {
+ NL_SET_ERR_MSG(extack, "unable to custom memory provider to device with XDP program attached");
return -EEXIST;
+ }
+
+ rxq = __netif_get_rx_queue(dev, rxq_idx);
+ if (rxq->mp_params.mp_ops) {
+ NL_SET_ERR_MSG(extack, "designated queue already memory provider bound");
+ return -EEXIST;
+ }
+#ifdef CONFIG_XDP_SOCKETS
+ if (rxq->pool) {
+ NL_SET_ERR_MSG(extack, "designated queue already in use by AF_XDP");
+ return -EBUSY;
+ }
+#endif
rxq->mp_params = *p;
- ret = netdev_rx_queue_restart(dev, ifq_idx);
+ ret = netdev_rx_queue_restart(dev, rxq_idx);
if (ret) {
rxq->mp_params.mp_ops = NULL;
rxq->mp_params.mp_priv = NULL;
@@ -112,19 +139,19 @@ static int __net_mp_open_rxq(struct net_device *dev, unsigned ifq_idx,
return ret;
}
-int net_mp_open_rxq(struct net_device *dev, unsigned ifq_idx,
+int net_mp_open_rxq(struct net_device *dev, unsigned int rxq_idx,
struct pp_memory_provider_params *p)
{
int ret;
netdev_lock(dev);
- ret = __net_mp_open_rxq(dev, ifq_idx, p);
+ ret = __net_mp_open_rxq(dev, rxq_idx, p, NULL);
netdev_unlock(dev);
return ret;
}
-static void __net_mp_close_rxq(struct net_device *dev, unsigned ifq_idx,
- struct pp_memory_provider_params *old_p)
+void __net_mp_close_rxq(struct net_device *dev, unsigned int ifq_idx,
+ const struct pp_memory_provider_params *old_p)
{
struct netdev_rx_queue *rxq;