diff options
author | Christian König <christian.koenig@amd.com> | 2017-09-05 17:30:46 +0200 |
---|---|---|
committer | Alex Deucher <alexander.deucher@amd.com> | 2017-09-12 14:27:56 -0400 |
commit | 1ed3d2567c800eca053ef86fdd3fc27b72d0192e (patch) | |
tree | e08cb35bf5b9e8ddabe4eb601d108890341b7467 /drivers/gpu/drm/amd/amdgpu/amdgpu_mn.c | |
parent | aebc5e6f50f770ec9392c3ca804f18b30797dfa7 (diff) | |
download | linux-1ed3d2567c800eca053ef86fdd3fc27b72d0192e.tar.gz linux-1ed3d2567c800eca053ef86fdd3fc27b72d0192e.tar.bz2 linux-1ed3d2567c800eca053ef86fdd3fc27b72d0192e.zip |
drm/amdgpu: keep the MMU lock until the update ends v4
This is quite controversial because it adds another lock which is held during
page table updates, but I don't see much other option.
v2: allow multiple updates to be in flight at the same time
v3: simplify the patch, take the read side only once
v4: correctly fix rebase conflict
Signed-off-by: Christian König <christian.koenig@amd.com>
Reviewed-by: Felix Kuehling <Felix.Kuehling@amd.com>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
Diffstat (limited to 'drivers/gpu/drm/amd/amdgpu/amdgpu_mn.c')
-rw-r--r-- | drivers/gpu/drm/amd/amdgpu/amdgpu_mn.c | 59 |
1 files changed, 55 insertions, 4 deletions
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_mn.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_mn.c index 99edb40b5f99..521a51b37f5d 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_mn.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_mn.c @@ -52,6 +52,8 @@ struct amdgpu_mn { /* objects protected by lock */ struct rw_semaphore lock; struct rb_root objects; + struct mutex read_lock; + atomic_t recursion; }; struct amdgpu_mn_node { @@ -126,6 +128,34 @@ void amdgpu_mn_unlock(struct amdgpu_mn *mn) } /** + * amdgpu_mn_read_lock - take the rmn read lock + * + * @rmn: our notifier + * + * Take the rmn read side lock. + */ +static void amdgpu_mn_read_lock(struct amdgpu_mn *rmn) +{ + mutex_lock(&rmn->read_lock); + if (atomic_inc_return(&rmn->recursion) == 1) + down_read_non_owner(&rmn->lock); + mutex_unlock(&rmn->read_lock); +} + +/** + * amdgpu_mn_read_unlock - drop the rmn read lock + * + * @rmn: our notifier + * + * Drop the rmn read side lock. + */ +static void amdgpu_mn_read_unlock(struct amdgpu_mn *rmn) +{ + if (atomic_dec_return(&rmn->recursion) == 0) + up_read_non_owner(&rmn->lock); +} + +/** * amdgpu_mn_invalidate_node - unmap all BOs of a node * * @node: the node with the BOs to unmap @@ -171,7 +201,7 @@ static void amdgpu_mn_invalidate_page(struct mmu_notifier *mn, struct amdgpu_mn *rmn = container_of(mn, struct amdgpu_mn, mn); struct interval_tree_node *it; - down_read(&rmn->lock); + amdgpu_mn_read_lock(rmn); it = interval_tree_iter_first(&rmn->objects, address, address); if (it) { @@ -181,7 +211,7 @@ static void amdgpu_mn_invalidate_page(struct mmu_notifier *mn, amdgpu_mn_invalidate_node(node, address, address); } - up_read(&rmn->lock); + amdgpu_mn_read_unlock(rmn); } /** @@ -206,7 +236,7 @@ static void amdgpu_mn_invalidate_range_start(struct mmu_notifier *mn, /* notification is exclusive, but interval is inclusive */ end -= 1; - down_read(&rmn->lock); + amdgpu_mn_read_lock(rmn); it = interval_tree_iter_first(&rmn->objects, start, end); while (it) { @@ -217,14 +247,33 @@ static void amdgpu_mn_invalidate_range_start(struct mmu_notifier *mn, amdgpu_mn_invalidate_node(node, start, end); } +} + +/** + * amdgpu_mn_invalidate_range_end - callback to notify about mm change + * + * @mn: our notifier + * @mn: the mm this callback is about + * @start: start of updated range + * @end: end of updated range + * + * Release the lock again to allow new command submissions. + */ +static void amdgpu_mn_invalidate_range_end(struct mmu_notifier *mn, + struct mm_struct *mm, + unsigned long start, + unsigned long end) +{ + struct amdgpu_mn *rmn = container_of(mn, struct amdgpu_mn, mn); - up_read(&rmn->lock); + amdgpu_mn_read_unlock(rmn); } static const struct mmu_notifier_ops amdgpu_mn_ops = { .release = amdgpu_mn_release, .invalidate_page = amdgpu_mn_invalidate_page, .invalidate_range_start = amdgpu_mn_invalidate_range_start, + .invalidate_range_end = amdgpu_mn_invalidate_range_end, }; /** @@ -261,6 +310,8 @@ struct amdgpu_mn *amdgpu_mn_get(struct amdgpu_device *adev) rmn->mn.ops = &amdgpu_mn_ops; init_rwsem(&rmn->lock); rmn->objects = RB_ROOT; + mutex_init(&rmn->read_lock); + atomic_set(&rmn->recursion, 0); r = __mmu_notifier_register(&rmn->mn, mm); if (r) |