summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorAvihai Horon <avihaih@nvidia.com>2023-04-10 16:07:50 +0300
committerLeon Romanovsky <leon@kernel.org>2023-04-16 13:29:07 +0300
commited4b0661cce119870edb1994fd06c9cbc1dc05c3 (patch)
treeab41ab922044764255e2734898ec449e095f9c46
parent8d7c7c0eeb74281c846ef9231ce20536c79a99b4 (diff)
downloadlinux-ed4b0661cce119870edb1994fd06c9cbc1dc05c3.tar.gz
linux-ed4b0661cce119870edb1994fd06c9cbc1dc05c3.tar.bz2
linux-ed4b0661cce119870edb1994fd06c9cbc1dc05c3.zip
RDMA/mlx5: Remove pcie_relaxed_ordering_enabled() check for RO write
pcie_relaxed_ordering_enabled() check was added to avoid a syndrome when creating a MKey with relaxed ordering (RO) enabled when the driver's relaxed_ordering_{read,write} HCA capabilities are out of sync with FW. While this can happen with relaxed_ordering_read, it can't happen with relaxed_ordering_write as it's set if the device supports RO write, regardless of RO in PCI config space, and thus can't change during runtime. Therefore, drop the pcie_relaxed_ordering_enabled() check for relaxed_ordering_write while keeping it for relaxed_ordering_read. Doing so will also allow the usage of RO write in VFs and VMs (where RO in PCI config space is not reported/emulated properly). Signed-off-by: Avihai Horon <avihaih@nvidia.com> Reviewed-by: Shay Drory <shayd@nvidia.com> Link: https://lore.kernel.org/r/7e8f55e31572c1702d69cae015a395d3a824a38a.1681131553.git.leon@kernel.org Reviewed-by: Jacob Keller <jacob.e.keller@intel.com> Signed-off-by: Leon Romanovsky <leon@kernel.org>
-rw-r--r--drivers/infiniband/hw/mlx5/mr.c6
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/params.c3
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_common.c2
3 files changed, 5 insertions, 6 deletions
diff --git a/drivers/infiniband/hw/mlx5/mr.c b/drivers/infiniband/hw/mlx5/mr.c
index 67356f515261..bd0a818ba1cd 100644
--- a/drivers/infiniband/hw/mlx5/mr.c
+++ b/drivers/infiniband/hw/mlx5/mr.c
@@ -67,11 +67,11 @@ static void set_mkc_access_pd_addr_fields(void *mkc, int acc, u64 start_addr,
MLX5_SET(mkc, mkc, lw, !!(acc & IB_ACCESS_LOCAL_WRITE));
MLX5_SET(mkc, mkc, lr, 1);
- if ((acc & IB_ACCESS_RELAXED_ORDERING) &&
- pcie_relaxed_ordering_enabled(dev->mdev->pdev)) {
+ if (acc & IB_ACCESS_RELAXED_ORDERING) {
if (MLX5_CAP_GEN(dev->mdev, relaxed_ordering_write))
MLX5_SET(mkc, mkc, relaxed_ordering_write, 1);
- if (MLX5_CAP_GEN(dev->mdev, relaxed_ordering_read))
+ if (MLX5_CAP_GEN(dev->mdev, relaxed_ordering_read) &&
+ pcie_relaxed_ordering_enabled(dev->mdev->pdev))
MLX5_SET(mkc, mkc, relaxed_ordering_read, 1);
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/params.c b/drivers/net/ethernet/mellanox/mlx5/core/en/params.c
index a21bd1179477..d840a59aec88 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/params.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/params.c
@@ -867,8 +867,7 @@ static void mlx5e_build_rx_cq_param(struct mlx5_core_dev *mdev,
static u8 rq_end_pad_mode(struct mlx5_core_dev *mdev, struct mlx5e_params *params)
{
bool lro_en = params->packet_merge.type == MLX5E_PACKET_MERGE_LRO;
- bool ro = pcie_relaxed_ordering_enabled(mdev->pdev) &&
- MLX5_CAP_GEN(mdev, relaxed_ordering_write);
+ bool ro = MLX5_CAP_GEN(mdev, relaxed_ordering_write);
return ro && lro_en ?
MLX5_WQ_END_PAD_MODE_NONE : MLX5_WQ_END_PAD_MODE_ALIGN;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_common.c b/drivers/net/ethernet/mellanox/mlx5/core/en_common.c
index 4c9a3210600c..993af4c12d90 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_common.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_common.c
@@ -44,7 +44,7 @@ void mlx5e_mkey_set_relaxed_ordering(struct mlx5_core_dev *mdev, void *mkc)
bool ro_read = MLX5_CAP_GEN(mdev, relaxed_ordering_read);
MLX5_SET(mkc, mkc, relaxed_ordering_read, ro_pci_enable && ro_read);
- MLX5_SET(mkc, mkc, relaxed_ordering_write, ro_pci_enable && ro_write);
+ MLX5_SET(mkc, mkc, relaxed_ordering_write, ro_write);
}
int mlx5e_create_mkey(struct mlx5_core_dev *mdev, u32 pdn, u32 *mkey)