diff options
author | Matt Helsley <matthltc@us.ibm.com> | 2008-04-29 01:01:36 -0700 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2008-04-29 08:06:17 -0700 |
commit | 925d1c401fa6cfd0df5d2e37da8981494ccdec07 (patch) | |
tree | 4f3b7a09311cd99783b822350628125e44f9902d /mm | |
parent | e93b4ea20adb20f1f1f07f10ba5d7dd739d2843e (diff) | |
download | linux-925d1c401fa6cfd0df5d2e37da8981494ccdec07.tar.gz linux-925d1c401fa6cfd0df5d2e37da8981494ccdec07.tar.bz2 linux-925d1c401fa6cfd0df5d2e37da8981494ccdec07.zip |
procfs task exe symlink
The kernel implements readlink of /proc/pid/exe by getting the file from
the first executable VMA. Then the path to the file is reconstructed and
reported as the result.
Because of the VMA walk the code is slightly different on nommu systems.
This patch avoids separate /proc/pid/exe code on nommu systems. Instead of
walking the VMAs to find the first executable file-backed VMA we store a
reference to the exec'd file in the mm_struct.
That reference would prevent the filesystem holding the executable file
from being unmounted even after unmapping the VMAs. So we track the number
of VM_EXECUTABLE VMAs and drop the new reference when the last one is
unmapped. This avoids pinning the mounted filesystem.
[akpm@linux-foundation.org: improve comments]
[yamamoto@valinux.co.jp: fix dup_mmap]
Signed-off-by: Matt Helsley <matthltc@us.ibm.com>
Cc: Oleg Nesterov <oleg@tv-sign.ru>
Cc: David Howells <dhowells@redhat.com>
Cc:"Eric W. Biederman" <ebiederm@xmission.com>
Cc: Christoph Hellwig <hch@lst.de>
Cc: Al Viro <viro@zeniv.linux.org.uk>
Cc: Hugh Dickins <hugh@veritas.com>
Signed-off-by: YAMAMOTO Takashi <yamamoto@valinux.co.jp>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm')
-rw-r--r-- | mm/mmap.c | 24 | ||||
-rw-r--r-- | mm/nommu.c | 23 |
2 files changed, 37 insertions, 10 deletions
diff --git a/mm/mmap.c b/mm/mmap.c index 677d184b0d42..fac66337da2a 100644 --- a/mm/mmap.c +++ b/mm/mmap.c @@ -230,8 +230,11 @@ static struct vm_area_struct *remove_vma(struct vm_area_struct *vma) might_sleep(); if (vma->vm_ops && vma->vm_ops->close) vma->vm_ops->close(vma); - if (vma->vm_file) + if (vma->vm_file) { fput(vma->vm_file); + if (vma->vm_flags & VM_EXECUTABLE) + removed_exe_file_vma(vma->vm_mm); + } mpol_put(vma_policy(vma)); kmem_cache_free(vm_area_cachep, vma); return next; @@ -623,8 +626,11 @@ again: remove_next = 1 + (end > next->vm_end); spin_unlock(&mapping->i_mmap_lock); if (remove_next) { - if (file) + if (file) { fput(file); + if (next->vm_flags & VM_EXECUTABLE) + removed_exe_file_vma(mm); + } mm->map_count--; mpol_put(vma_policy(next)); kmem_cache_free(vm_area_cachep, next); @@ -1154,6 +1160,8 @@ munmap_back: error = file->f_op->mmap(file, vma); if (error) goto unmap_and_free_vma; + if (vm_flags & VM_EXECUTABLE) + added_exe_file_vma(mm); } else if (vm_flags & VM_SHARED) { error = shmem_zero_setup(vma); if (error) @@ -1185,6 +1193,8 @@ munmap_back: mpol_put(vma_policy(vma)); kmem_cache_free(vm_area_cachep, vma); fput(file); + if (vm_flags & VM_EXECUTABLE) + removed_exe_file_vma(mm); } else { vma_link(mm, vma, prev, rb_link, rb_parent); file = vma->vm_file; @@ -1817,8 +1827,11 @@ int split_vma(struct mm_struct * mm, struct vm_area_struct * vma, } vma_set_policy(new, pol); - if (new->vm_file) + if (new->vm_file) { get_file(new->vm_file); + if (vma->vm_flags & VM_EXECUTABLE) + added_exe_file_vma(mm); + } if (new->vm_ops && new->vm_ops->open) new->vm_ops->open(new); @@ -2135,8 +2148,11 @@ struct vm_area_struct *copy_vma(struct vm_area_struct **vmap, new_vma->vm_start = addr; new_vma->vm_end = addr + len; new_vma->vm_pgoff = pgoff; - if (new_vma->vm_file) + if (new_vma->vm_file) { get_file(new_vma->vm_file); + if (vma->vm_flags & VM_EXECUTABLE) + added_exe_file_vma(mm); + } if (new_vma->vm_ops && new_vma->vm_ops->open) new_vma->vm_ops->open(new_vma); vma_link(mm, new_vma, prev, rb_link, rb_parent); diff --git a/mm/nommu.c b/mm/nommu.c index 1d32fe89d57b..ef8c62cec697 100644 --- a/mm/nommu.c +++ b/mm/nommu.c @@ -966,8 +966,13 @@ unsigned long do_mmap_pgoff(struct file *file, INIT_LIST_HEAD(&vma->anon_vma_node); atomic_set(&vma->vm_usage, 1); - if (file) + if (file) { get_file(file); + if (vm_flags & VM_EXECUTABLE) { + added_exe_file_vma(current->mm); + vma->vm_mm = current->mm; + } + } vma->vm_file = file; vma->vm_flags = vm_flags; vma->vm_start = addr; @@ -1022,8 +1027,11 @@ unsigned long do_mmap_pgoff(struct file *file, up_write(&nommu_vma_sem); kfree(vml); if (vma) { - if (vma->vm_file) + if (vma->vm_file) { fput(vma->vm_file); + if (vma->vm_flags & VM_EXECUTABLE) + removed_exe_file_vma(vma->vm_mm); + } kfree(vma); } return ret; @@ -1053,7 +1061,7 @@ EXPORT_SYMBOL(do_mmap_pgoff); /* * handle mapping disposal for uClinux */ -static void put_vma(struct vm_area_struct *vma) +static void put_vma(struct mm_struct *mm, struct vm_area_struct *vma) { if (vma) { down_write(&nommu_vma_sem); @@ -1075,8 +1083,11 @@ static void put_vma(struct vm_area_struct *vma) realalloc -= kobjsize(vma); askedalloc -= sizeof(*vma); - if (vma->vm_file) + if (vma->vm_file) { fput(vma->vm_file); + if (vma->vm_flags & VM_EXECUTABLE) + removed_exe_file_vma(mm); + } kfree(vma); } @@ -1113,7 +1124,7 @@ int do_munmap(struct mm_struct *mm, unsigned long addr, size_t len) found: vml = *parent; - put_vma(vml->vma); + put_vma(mm, vml->vma); *parent = vml->next; realalloc -= kobjsize(vml); @@ -1158,7 +1169,7 @@ void exit_mmap(struct mm_struct * mm) while ((tmp = mm->context.vmlist)) { mm->context.vmlist = tmp->next; - put_vma(tmp->vma); + put_vma(mm, tmp->vma); realalloc -= kobjsize(tmp); askedalloc -= sizeof(*tmp); |