summaryrefslogtreecommitdiffstats
path: root/include/rdma
diff options
context:
space:
mode:
authorJason Gunthorpe <jgg@mellanox.com>2018-09-16 20:48:09 +0300
committerDoug Ledford <dledford@redhat.com>2018-09-21 11:58:36 -0400
commitca748c39ea3f3c755295d64d69ba0b4375e34b5d (patch)
tree64bbe9e8457d512169ead7e8fa43bff77958eab0 /include/rdma
parentf27a0d50a4bc2861b472c2e3740d63a29d1ac460 (diff)
downloadlinux-ca748c39ea3f3c755295d64d69ba0b4375e34b5d.tar.gz
linux-ca748c39ea3f3c755295d64d69ba0b4375e34b5d.tar.bz2
linux-ca748c39ea3f3c755295d64d69ba0b4375e34b5d.zip
RDMA/umem: Get rid of per_mm->notifier_count
This is intrinsically racy and the scheme is simply unnecessary. New MR registration can wait for any on going invalidation to fully complete. CPU0 CPU1 if (atomic_read()) if (atomic_dec_and_test() && !list_empty()) { /* not taken */ } list_add() Putting the new UMEM into some kind of purgatory until another invalidate rolls through.. Instead hold the read side of the umem_rwsem across the pair'd start/end and get rid of the racy 'deferred add' approach. Since all umem's in the rbt are always ready to go, also get rid of the mn_counters_active stuff. Signed-off-by: Jason Gunthorpe <jgg@mellanox.com> Signed-off-by: Leon Romanovsky <leonro@mellanox.com> Signed-off-by: Doug Ledford <dledford@redhat.com>
Diffstat (limited to 'include/rdma')
-rw-r--r--include/rdma/ib_umem_odp.h15
1 files changed, 0 insertions, 15 deletions
diff --git a/include/rdma/ib_umem_odp.h b/include/rdma/ib_umem_odp.h
index 259eb08dfc9e..ce9502545903 100644
--- a/include/rdma/ib_umem_odp.h
+++ b/include/rdma/ib_umem_odp.h
@@ -67,15 +67,9 @@ struct ib_umem_odp {
struct mutex umem_mutex;
void *private; /* for the HW driver to use. */
- /* When false, use the notifier counter in the ucontext struct. */
- bool mn_counters_active;
int notifiers_seq;
int notifiers_count;
- /* A linked list of umems that don't have private mmu notifier
- * counters yet. */
- struct list_head no_private_counters;
-
/* Tree tracking */
struct umem_odp_node interval_tree;
@@ -99,11 +93,8 @@ struct ib_ucontext_per_mm {
struct rb_root_cached umem_tree;
/* Protects umem_tree */
struct rw_semaphore umem_rwsem;
- atomic_t notifier_count;
struct mmu_notifier mn;
- /* A list of umems that don't have private mmu notifier counters yet. */
- struct list_head no_private_counters;
unsigned int odp_mrs_count;
struct list_head ucontext_list;
@@ -162,12 +153,6 @@ static inline int ib_umem_mmu_notifier_retry(struct ib_umem_odp *umem_odp,
* and the ucontext umem_mutex semaphore locked for read).
*/
- /* Do not allow page faults while the new ib_umem hasn't seen a state
- * with zero notifiers yet, and doesn't have its own valid set of
- * private counters. */
- if (!umem_odp->mn_counters_active)
- return 1;
-
if (unlikely(umem_odp->notifiers_count))
return 1;
if (umem_odp->notifiers_seq != mmu_seq)