diff options
author | Michael S. Tsirkin <mst@redhat.com> | 2009-09-21 17:03:51 -0700 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2009-09-22 07:17:42 -0700 |
commit | 3d2d827f5ca5e32816194119d5c980c7e04474a6 (patch) | |
tree | fe0e84669f5f20e1dff8e3dc6b191b4d5dfc0145 /mm/mmu_context.c | |
parent | 425fbf047cc70bb30dff368a6da02c8c2d229318 (diff) | |
download | linux-stable-3d2d827f5ca5e32816194119d5c980c7e04474a6.tar.gz linux-stable-3d2d827f5ca5e32816194119d5c980c7e04474a6.tar.bz2 linux-stable-3d2d827f5ca5e32816194119d5c980c7e04474a6.zip |
mm: move use_mm/unuse_mm from aio.c to mm/
Anyone who wants to do copy to/from user from a kernel thread, needs
use_mm (like what fs/aio has). Move that into mm/, to make reusing and
exporting easier down the line, and make aio use it. Next intended user,
besides aio, will be vhost-net.
Acked-by: Andrea Arcangeli <aarcange@redhat.com>
Signed-off-by: Michael S. Tsirkin <mst@redhat.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm/mmu_context.c')
-rw-r--r-- | mm/mmu_context.c | 55 |
1 files changed, 55 insertions, 0 deletions
diff --git a/mm/mmu_context.c b/mm/mmu_context.c new file mode 100644 index 000000000000..fd473b51c903 --- /dev/null +++ b/mm/mmu_context.c @@ -0,0 +1,55 @@ +/* Copyright (C) 2009 Red Hat, Inc. + * + * See ../COPYING for licensing terms. + */ + +#include <linux/mm.h> +#include <linux/mmu_context.h> +#include <linux/sched.h> + +#include <asm/mmu_context.h> + +/* + * use_mm + * Makes the calling kernel thread take on the specified + * mm context. + * Called by the retry thread execute retries within the + * iocb issuer's mm context, so that copy_from/to_user + * operations work seamlessly for aio. + * (Note: this routine is intended to be called only + * from a kernel thread context) + */ +void use_mm(struct mm_struct *mm) +{ + struct mm_struct *active_mm; + struct task_struct *tsk = current; + + task_lock(tsk); + active_mm = tsk->active_mm; + atomic_inc(&mm->mm_count); + tsk->mm = mm; + tsk->active_mm = mm; + switch_mm(active_mm, mm, tsk); + task_unlock(tsk); + + mmdrop(active_mm); +} + +/* + * unuse_mm + * Reverses the effect of use_mm, i.e. releases the + * specified mm context which was earlier taken on + * by the calling kernel thread + * (Note: this routine is intended to be called only + * from a kernel thread context) + */ +void unuse_mm(struct mm_struct *mm) +{ + struct task_struct *tsk = current; + + task_lock(tsk); + tsk->mm = NULL; + /* active_mm is still 'mm' */ + enter_lazy_tlb(mm, tsk); + task_unlock(tsk); +} |