summaryrefslogtreecommitdiffstats
path: root/drivers/infiniband/hw/mlx5/mr.c
diff options
context:
space:
mode:
authorJason Gunthorpe <jgg@mellanox.com>2020-03-10 10:22:35 +0200
committerJason Gunthorpe <jgg@mellanox.com>2020-03-13 11:08:01 -0300
commitad2d3ef46d2a88f2906d8d0cc6b912199ec3f1d6 (patch)
treee340c15843e1aa47281a4eebe0b2fa31a3781f62 /drivers/infiniband/hw/mlx5/mr.c
parenta1d8854aae4ee19df6161a276a99d3c9c2abc4f3 (diff)
downloadlinux-ad2d3ef46d2a88f2906d8d0cc6b912199ec3f1d6.tar.gz
linux-ad2d3ef46d2a88f2906d8d0cc6b912199ec3f1d6.tar.bz2
linux-ad2d3ef46d2a88f2906d8d0cc6b912199ec3f1d6.zip
RDMA/mlx5: Lock access to ent->available_mrs/limit when doing queue_work
Accesses to these members needs to be locked. There is no reason not to hold a spinlock while calling queue_work(), so move the tests into a helper and always call it under lock. The helper should be called when available_mrs is adjusted. Link: https://lore.kernel.org/r/20200310082238.239865-10-leon@kernel.org Signed-off-by: Leon Romanovsky <leonro@mellanox.com> Signed-off-by: Jason Gunthorpe <jgg@mellanox.com>
Diffstat (limited to 'drivers/infiniband/hw/mlx5/mr.c')
-rw-r--r--drivers/infiniband/hw/mlx5/mr.c40
1 files changed, 25 insertions, 15 deletions
diff --git a/drivers/infiniband/hw/mlx5/mr.c b/drivers/infiniband/hw/mlx5/mr.c
index 091e24c58e2c..b46039d86b98 100644
--- a/drivers/infiniband/hw/mlx5/mr.c
+++ b/drivers/infiniband/hw/mlx5/mr.c
@@ -134,6 +134,10 @@ static void create_mkey_callback(int status, struct mlx5_async_work *context)
list_add_tail(&mr->list, &ent->head);
ent->available_mrs++;
ent->total_mrs++;
+ /*
+ * Creating is always done in response to some demand, so do not call
+ * queue_adjust_cache_locked().
+ */
spin_unlock_irqrestore(&ent->lock, flags);
if (!completion_done(&ent->compl))
@@ -367,6 +371,20 @@ static int someone_adding(struct mlx5_mr_cache *cache)
return 0;
}
+/*
+ * Check if the bucket is outside the high/low water mark and schedule an async
+ * update. The cache refill has hysteresis, once the low water mark is hit it is
+ * refilled up to the high mark.
+ */
+static void queue_adjust_cache_locked(struct mlx5_cache_ent *ent)
+{
+ lockdep_assert_held(&ent->lock);
+
+ if (ent->available_mrs < ent->limit ||
+ ent->available_mrs > 2 * ent->limit)
+ queue_work(ent->dev->cache.wq, &ent->work);
+}
+
static void __cache_work_func(struct mlx5_cache_ent *ent)
{
struct mlx5_ib_dev *dev = ent->dev;
@@ -462,9 +480,8 @@ struct mlx5_ib_mr *mlx5_mr_cache_alloc(struct mlx5_ib_dev *dev,
list);
list_del(&mr->list);
ent->available_mrs--;
+ queue_adjust_cache_locked(ent);
spin_unlock_irq(&ent->lock);
- if (ent->available_mrs < ent->limit)
- queue_work(cache->wq, &ent->work);
return mr;
}
}
@@ -487,14 +504,12 @@ static struct mlx5_ib_mr *alloc_cached_mr(struct mlx5_cache_ent *req_ent)
list);
list_del(&mr->list);
ent->available_mrs--;
+ queue_adjust_cache_locked(ent);
spin_unlock_irq(&ent->lock);
- if (ent->available_mrs < ent->limit)
- queue_work(dev->cache.wq, &ent->work);
break;
}
+ queue_adjust_cache_locked(ent);
spin_unlock_irq(&ent->lock);
-
- queue_work(dev->cache.wq, &ent->work);
}
if (!mr)
@@ -516,7 +531,6 @@ static void detach_mr_from_cache(struct mlx5_ib_mr *mr)
void mlx5_mr_cache_free(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr)
{
struct mlx5_cache_ent *ent = mr->cache_ent;
- int shrink = 0;
if (!ent)
return;
@@ -524,20 +538,14 @@ void mlx5_mr_cache_free(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr)
if (mlx5_mr_cache_invalidate(mr)) {
detach_mr_from_cache(mr);
destroy_mkey(dev, mr);
- if (ent->available_mrs < ent->limit)
- queue_work(dev->cache.wq, &ent->work);
return;
}
spin_lock_irq(&ent->lock);
list_add_tail(&mr->list, &ent->head);
ent->available_mrs++;
- if (ent->available_mrs > 2 * ent->limit)
- shrink = 1;
+ queue_adjust_cache_locked(ent);
spin_unlock_irq(&ent->lock);
-
- if (shrink)
- queue_work(dev->cache.wq, &ent->work);
}
static void clean_keys(struct mlx5_ib_dev *dev, int c)
@@ -653,7 +661,9 @@ int mlx5_mr_cache_init(struct mlx5_ib_dev *dev)
ent->limit = dev->mdev->profile->mr_cache[i].limit;
else
ent->limit = 0;
- queue_work(cache->wq, &ent->work);
+ spin_lock_irq(&ent->lock);
+ queue_adjust_cache_locked(ent);
+ spin_unlock_irq(&ent->lock);
}
mlx5_mr_cache_debugfs_init(dev);