summaryrefslogtreecommitdiffstats
path: root/drivers/net/ethernet/mellanox/mlx5/core/dev.c
diff options
context:
space:
mode:
authorShay Drory <shayd@nvidia.com>2023-10-12 12:27:40 -0700
committerSaeed Mahameed <saeedm@nvidia.com>2023-10-14 10:16:31 -0700
commitb430c1b4f63be51dde175a1dd3addba65ca24e2b (patch)
treed0426edc7e49d6ecbe54efb8e03e217025571e53 /drivers/net/ethernet/mellanox/mlx5/core/dev.c
parente534552c92a44690e48593f9567fe689545ded73 (diff)
downloadlinux-stable-b430c1b4f63be51dde175a1dd3addba65ca24e2b.tar.gz
linux-stable-b430c1b4f63be51dde175a1dd3addba65ca24e2b.tar.bz2
linux-stable-b430c1b4f63be51dde175a1dd3addba65ca24e2b.zip
net/mlx5: Replace global mlx5_intf_lock with HCA devcom component lock
mlx5_intf_lock is used to sync between LAG changes and its slaves mlx5 core dev aux devices changes, which means every time mlx5 core dev add/remove aux devices, mlx5 is taking this global lock, even if LAG functionality isn't supported over the core dev. This cause a bottleneck when probing VFs/SFs in parallel. Hence, replace mlx5_intf_lock with HCA devcom component lock, or no lock if LAG functionality isn't supported. Signed-off-by: Shay Drory <shayd@nvidia.com> Reviewed-by: Mark Bloch <mbloch@nvidia.com> Reviewed-by: Jacob Keller <jacob.e.keller@intel.com> Signed-off-by: Saeed Mahameed <saeedm@nvidia.com>
Diffstat (limited to 'drivers/net/ethernet/mellanox/mlx5/core/dev.c')
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/dev.c37
1 files changed, 10 insertions, 27 deletions
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/dev.c b/drivers/net/ethernet/mellanox/mlx5/core/dev.c
index 6e3a8c22881f..cf0477f53dc4 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/dev.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/dev.c
@@ -38,8 +38,6 @@
#include "devlink.h"
#include "lag/lag.h"
-/* intf dev list mutex */
-static DEFINE_MUTEX(mlx5_intf_mutex);
static DEFINE_IDA(mlx5_adev_ida);
static bool is_eth_rep_supported(struct mlx5_core_dev *dev)
@@ -337,9 +335,9 @@ static void del_adev(struct auxiliary_device *adev)
void mlx5_dev_set_lightweight(struct mlx5_core_dev *dev)
{
- mutex_lock(&mlx5_intf_mutex);
+ mlx5_devcom_comp_lock(dev->priv.hca_devcom_comp);
dev->priv.flags |= MLX5_PRIV_FLAGS_DISABLE_ALL_ADEV;
- mutex_unlock(&mlx5_intf_mutex);
+ mlx5_devcom_comp_unlock(dev->priv.hca_devcom_comp);
}
bool mlx5_dev_is_lightweight(struct mlx5_core_dev *dev)
@@ -355,7 +353,7 @@ int mlx5_attach_device(struct mlx5_core_dev *dev)
int ret = 0, i;
devl_assert_locked(priv_to_devlink(dev));
- mutex_lock(&mlx5_intf_mutex);
+ mlx5_devcom_comp_lock(dev->priv.hca_devcom_comp);
priv->flags &= ~MLX5_PRIV_FLAGS_DETACH;
for (i = 0; i < ARRAY_SIZE(mlx5_adev_devices); i++) {
if (!priv->adev[i]) {
@@ -400,7 +398,7 @@ int mlx5_attach_device(struct mlx5_core_dev *dev)
break;
}
}
- mutex_unlock(&mlx5_intf_mutex);
+ mlx5_devcom_comp_unlock(dev->priv.hca_devcom_comp);
return ret;
}
@@ -413,7 +411,7 @@ void mlx5_detach_device(struct mlx5_core_dev *dev, bool suspend)
int i;
devl_assert_locked(priv_to_devlink(dev));
- mutex_lock(&mlx5_intf_mutex);
+ mlx5_devcom_comp_lock(dev->priv.hca_devcom_comp);
for (i = ARRAY_SIZE(mlx5_adev_devices) - 1; i >= 0; i--) {
if (!priv->adev[i])
continue;
@@ -443,7 +441,7 @@ skip_suspend:
priv->adev[i] = NULL;
}
priv->flags |= MLX5_PRIV_FLAGS_DETACH;
- mutex_unlock(&mlx5_intf_mutex);
+ mlx5_devcom_comp_unlock(dev->priv.hca_devcom_comp);
}
int mlx5_register_device(struct mlx5_core_dev *dev)
@@ -451,10 +449,10 @@ int mlx5_register_device(struct mlx5_core_dev *dev)
int ret;
devl_assert_locked(priv_to_devlink(dev));
- mutex_lock(&mlx5_intf_mutex);
+ mlx5_devcom_comp_lock(dev->priv.hca_devcom_comp);
dev->priv.flags &= ~MLX5_PRIV_FLAGS_DISABLE_ALL_ADEV;
ret = mlx5_rescan_drivers_locked(dev);
- mutex_unlock(&mlx5_intf_mutex);
+ mlx5_devcom_comp_unlock(dev->priv.hca_devcom_comp);
if (ret)
mlx5_unregister_device(dev);
@@ -464,10 +462,10 @@ int mlx5_register_device(struct mlx5_core_dev *dev)
void mlx5_unregister_device(struct mlx5_core_dev *dev)
{
devl_assert_locked(priv_to_devlink(dev));
- mutex_lock(&mlx5_intf_mutex);
+ mlx5_devcom_comp_lock(dev->priv.hca_devcom_comp);
dev->priv.flags = MLX5_PRIV_FLAGS_DISABLE_ALL_ADEV;
mlx5_rescan_drivers_locked(dev);
- mutex_unlock(&mlx5_intf_mutex);
+ mlx5_devcom_comp_unlock(dev->priv.hca_devcom_comp);
}
static int add_drivers(struct mlx5_core_dev *dev)
@@ -545,7 +543,6 @@ int mlx5_rescan_drivers_locked(struct mlx5_core_dev *dev)
{
struct mlx5_priv *priv = &dev->priv;
- lockdep_assert_held(&mlx5_intf_mutex);
if (priv->flags & MLX5_PRIV_FLAGS_DETACH)
return 0;
@@ -565,17 +562,3 @@ bool mlx5_same_hw_devs(struct mlx5_core_dev *dev, struct mlx5_core_dev *peer_dev
return (fsystem_guid && psystem_guid && fsystem_guid == psystem_guid);
}
-
-void mlx5_dev_list_lock(void)
-{
- mutex_lock(&mlx5_intf_mutex);
-}
-void mlx5_dev_list_unlock(void)
-{
- mutex_unlock(&mlx5_intf_mutex);
-}
-
-int mlx5_dev_list_trylock(void)
-{
- return mutex_trylock(&mlx5_intf_mutex);
-}