summaryrefslogtreecommitdiffstats
path: root/drivers/infiniband/hw/mlx5/odp.c
diff options
context:
space:
mode:
authorAharon Landau <aharonl@nvidia.com>2022-02-15 19:55:31 +0200
committerJason Gunthorpe <jgg@nvidia.com>2022-02-23 14:59:13 -0400
commit56561ac6b27d489feb5d1e7e8b2a55a15063fcad (patch)
tree2339c32605555f558de47c0b3d6ed794f96272aa /drivers/infiniband/hw/mlx5/odp.c
parent2f0e60d5e9f96341a0c8a01be8878cdb3b29ff20 (diff)
downloadlinux-stable-56561ac6b27d489feb5d1e7e8b2a55a15063fcad.tar.gz
linux-stable-56561ac6b27d489feb5d1e7e8b2a55a15063fcad.tar.bz2
linux-stable-56561ac6b27d489feb5d1e7e8b2a55a15063fcad.zip
RDMA/mlx5: Merge similar flows of allocating MR from the cache
When allocating a MR from the cache, the driver calls to get_cache_mr(), and in case of failure, retries with create_cache_mr(). This is the flow of mlx5_mr_cache_alloc(), so use it instead. Link: https://lore.kernel.org/r/53c85fcd4de6ec9de0b8e6cbb1bf5d5fe19900c3.1644947594.git.leonro@nvidia.com Signed-off-by: Aharon Landau <aharonl@nvidia.com> Signed-off-by: Leon Romanovsky <leonro@nvidia.com> Signed-off-by: Jason Gunthorpe <jgg@nvidia.com>
Diffstat (limited to 'drivers/infiniband/hw/mlx5/odp.c')
-rw-r--r--drivers/infiniband/hw/mlx5/odp.c11
1 files changed, 8 insertions, 3 deletions
diff --git a/drivers/infiniband/hw/mlx5/odp.c b/drivers/infiniband/hw/mlx5/odp.c
index 86842cd580ba..f834c9590c51 100644
--- a/drivers/infiniband/hw/mlx5/odp.c
+++ b/drivers/infiniband/hw/mlx5/odp.c
@@ -407,6 +407,7 @@ static void mlx5_ib_page_fault_resume(struct mlx5_ib_dev *dev,
static struct mlx5_ib_mr *implicit_get_child_mr(struct mlx5_ib_mr *imr,
unsigned long idx)
{
+ struct mlx5_ib_dev *dev = mr_to_mdev(imr);
struct ib_umem_odp *odp;
struct mlx5_ib_mr *mr;
struct mlx5_ib_mr *ret;
@@ -418,13 +419,14 @@ static struct mlx5_ib_mr *implicit_get_child_mr(struct mlx5_ib_mr *imr,
if (IS_ERR(odp))
return ERR_CAST(odp);
- mr = mlx5_mr_cache_alloc(
- mr_to_mdev(imr), MLX5_IMR_MTT_CACHE_ENTRY, imr->access_flags);
+ mr = mlx5_mr_cache_alloc(dev, &dev->cache.ent[MLX5_IMR_MTT_CACHE_ENTRY],
+ imr->access_flags);
if (IS_ERR(mr)) {
ib_umem_odp_release(odp);
return mr;
}
+ mr->access_flags = imr->access_flags;
mr->ibmr.pd = imr->ibmr.pd;
mr->ibmr.device = &mr_to_mdev(imr)->ib_dev;
mr->umem = &odp->umem;
@@ -493,12 +495,15 @@ struct mlx5_ib_mr *mlx5_ib_alloc_implicit_mr(struct mlx5_ib_pd *pd,
if (IS_ERR(umem_odp))
return ERR_CAST(umem_odp);
- imr = mlx5_mr_cache_alloc(dev, MLX5_IMR_KSM_CACHE_ENTRY, access_flags);
+ imr = mlx5_mr_cache_alloc(dev,
+ &dev->cache.ent[MLX5_IMR_KSM_CACHE_ENTRY],
+ access_flags);
if (IS_ERR(imr)) {
ib_umem_odp_release(umem_odp);
return imr;
}
+ imr->access_flags = access_flags;
imr->ibmr.pd = &pd->ibpd;
imr->ibmr.iova = 0;
imr->umem = &umem_odp->umem;