summaryrefslogtreecommitdiffstats
path: root/mm/mmu_notifier.c
diff options
context:
space:
mode:
authorDaniel Vetter <daniel.vetter@ffwll.ch>2019-08-26 22:14:24 +0200
committerJason Gunthorpe <jgg@mellanox.com>2019-09-07 04:28:05 -0300
commitba170f76b69d1d45a60eaa9ec920c8fddd4c16f3 (patch)
treef40356848e40317b54de13ab58bf14afc7489853 /mm/mmu_notifier.c
parent312364f3534cc974b79a96d062bde2386315201f (diff)
downloadlinux-stable-ba170f76b69d1d45a60eaa9ec920c8fddd4c16f3.tar.gz
linux-stable-ba170f76b69d1d45a60eaa9ec920c8fddd4c16f3.tar.bz2
linux-stable-ba170f76b69d1d45a60eaa9ec920c8fddd4c16f3.zip
mm, notifier: Catch sleeping/blocking for !blockable
We need to make sure implementations don't cheat and don't have a possible schedule/blocking point deeply burried where review can't catch it. I'm not sure whether this is the best way to make sure all the might_sleep() callsites trigger, and it's a bit ugly in the code flow. But it gets the job done. Inspired by an i915 patch series which did exactly that, because the rules haven't been entirely clear to us. Link: https://lore.kernel.org/r/20190826201425.17547-5-daniel.vetter@ffwll.ch Reviewed-by: Christian König <christian.koenig@amd.com> (v1) Reviewed-by: Jérôme Glisse <jglisse@redhat.com> (v4) Signed-off-by: Daniel Vetter <daniel.vetter@intel.com> Reviewed-by: Jason Gunthorpe <jgg@mellanox.com> Signed-off-by: Jason Gunthorpe <jgg@mellanox.com>
Diffstat (limited to 'mm/mmu_notifier.c')
-rw-r--r--mm/mmu_notifier.c15
1 files changed, 13 insertions, 2 deletions
diff --git a/mm/mmu_notifier.c b/mm/mmu_notifier.c
index 3f39fb1402db..7fde88695f35 100644
--- a/mm/mmu_notifier.c
+++ b/mm/mmu_notifier.c
@@ -168,7 +168,13 @@ int __mmu_notifier_invalidate_range_start(struct mmu_notifier_range *range)
id = srcu_read_lock(&srcu);
hlist_for_each_entry_rcu(mn, &range->mm->mmu_notifier_mm->list, hlist) {
if (mn->ops->invalidate_range_start) {
- int _ret = mn->ops->invalidate_range_start(mn, range);
+ int _ret;
+
+ if (!mmu_notifier_range_blockable(range))
+ non_block_start();
+ _ret = mn->ops->invalidate_range_start(mn, range);
+ if (!mmu_notifier_range_blockable(range))
+ non_block_end();
if (_ret) {
pr_info("%pS callback failed with %d in %sblockable context.\n",
mn->ops->invalidate_range_start, _ret,
@@ -210,8 +216,13 @@ void __mmu_notifier_invalidate_range_end(struct mmu_notifier_range *range,
mn->ops->invalidate_range(mn, range->mm,
range->start,
range->end);
- if (mn->ops->invalidate_range_end)
+ if (mn->ops->invalidate_range_end) {
+ if (!mmu_notifier_range_blockable(range))
+ non_block_start();
mn->ops->invalidate_range_end(mn, range);
+ if (!mmu_notifier_range_blockable(range))
+ non_block_end();
+ }
}
srcu_read_unlock(&srcu, id);
lock_map_release(&__mmu_notifier_invalidate_range_start_map);