summaryrefslogtreecommitdiffstats
path: root/ipc
diff options
context:
space:
mode:
authorDave Hansen <dave.hansen@linux.intel.com>2014-12-12 16:58:19 -0800
committerLinus Torvalds <torvalds@linux-foundation.org>2014-12-13 12:42:52 -0800
commitd3c97900b427b8d5a476fdfe484267f09df418d6 (patch)
tree0169510392ae190dcec4376a7341efe955fa27fd /ipc
parent0050ee059f7fc86b1df2527aaa14ed5dc72f9973 (diff)
downloadlinux-stable-d3c97900b427b8d5a476fdfe484267f09df418d6.tar.gz
linux-stable-d3c97900b427b8d5a476fdfe484267f09df418d6.tar.bz2
linux-stable-d3c97900b427b8d5a476fdfe484267f09df418d6.zip
ipc/shm.c: fix overly aggressive shmdt() when calls span multiple segments
This is a highly-contrived scenario. But, a single shmdt() call can be induced in to unmapping memory from mulitple shm segments. Example code is here: http://www.sr71.net/~dave/intel/shmfun.c The fix is pretty simple: Record the 'struct file' for the first VMA we encounter and then stick to it. Decline to unmap anything not from the same file and thus the same segment. I found this by inspection and the odds of anyone hitting this in practice are pretty darn small. Lightly tested, but it's a pretty small patch. Signed-off-by: Dave Hansen <dave.hansen@linux.intel.com> Cc: Manfred Spraul <manfred@colorfullife.com> Reviewed-by: Davidlohr Bueso <dave@stgolabs.net> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'ipc')
-rw-r--r--ipc/shm.c18
1 files changed, 13 insertions, 5 deletions
diff --git a/ipc/shm.c b/ipc/shm.c
index 01454796ba3c..e5dc7d246f05 100644
--- a/ipc/shm.c
+++ b/ipc/shm.c
@@ -1229,6 +1229,7 @@ SYSCALL_DEFINE1(shmdt, char __user *, shmaddr)
int retval = -EINVAL;
#ifdef CONFIG_MMU
loff_t size = 0;
+ struct file *file;
struct vm_area_struct *next;
#endif
@@ -1245,7 +1246,8 @@ SYSCALL_DEFINE1(shmdt, char __user *, shmaddr)
* started at address shmaddr. It records it's size and then unmaps
* it.
* - Then it unmaps all shm vmas that started at shmaddr and that
- * are within the initially determined size.
+ * are within the initially determined size and that are from the
+ * same shm segment from which we determined the size.
* Errors from do_munmap are ignored: the function only fails if
* it's called with invalid parameters or if it's called to unmap
* a part of a vma. Both calls in this function are for full vmas,
@@ -1271,8 +1273,14 @@ SYSCALL_DEFINE1(shmdt, char __user *, shmaddr)
if ((vma->vm_ops == &shm_vm_ops) &&
(vma->vm_start - addr)/PAGE_SIZE == vma->vm_pgoff) {
-
- size = file_inode(vma->vm_file)->i_size;
+ /*
+ * Record the file of the shm segment being
+ * unmapped. With mremap(), someone could place
+ * page from another segment but with equal offsets
+ * in the range we are unmapping.
+ */
+ file = vma->vm_file;
+ size = file_inode(file)->i_size;
do_munmap(mm, vma->vm_start, vma->vm_end - vma->vm_start);
/*
* We discovered the size of the shm segment, so
@@ -1298,8 +1306,8 @@ SYSCALL_DEFINE1(shmdt, char __user *, shmaddr)
/* finding a matching vma now does not alter retval */
if ((vma->vm_ops == &shm_vm_ops) &&
- (vma->vm_start - addr)/PAGE_SIZE == vma->vm_pgoff)
-
+ ((vma->vm_start - addr)/PAGE_SIZE == vma->vm_pgoff) &&
+ (vma->vm_file == file))
do_munmap(mm, vma->vm_start, vma->vm_end - vma->vm_start);
vma = next;
}