summaryrefslogtreecommitdiffstats
path: root/fs/exec.c
diff options
context:
space:
mode:
authorOleg Nesterov <oleg@tv-sign.ru>2007-10-16 23:27:22 -0700
committerLinus Torvalds <torvalds@woody.linux-foundation.org>2007-10-17 08:42:53 -0700
commitb2c903b8790467ae400f6992ac01bc8913b49e19 (patch)
tree656e6523b76856742cb38e36acf90ec5641bc7be /fs/exec.c
parent0840a90d943bcde2fbfeabd3c256236eed2273cd (diff)
downloadlinux-stable-b2c903b8790467ae400f6992ac01bc8913b49e19.tar.gz
linux-stable-b2c903b8790467ae400f6992ac01bc8913b49e19.tar.bz2
linux-stable-b2c903b8790467ae400f6992ac01bc8913b49e19.zip
exec: simplify the new ->sighand allocation
de_thread() pre-allocates newsighand to make sure that exec() can't fail after killing all sub-threads. Imho, this buys nothing, but complicates the code: - this is (mostly) needed to handle CLONE_SIGHAND without CLONE_THREAD tasks, this is very unlikely (if ever used) case - unless we already have some serious problems, GFP_KERNEL allocation should not fail - ENOMEM still can happen after de_thread(), ->sighand is not the last object we have to allocate Change the code to allocate the new ->sighand on demand. Signed-off-by: Oleg Nesterov <oleg@tv-sign.ru> Cc: Roland McGrath <roland@redhat.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'fs/exec.c')
-rw-r--r--fs/exec.c24
1 files changed, 9 insertions, 15 deletions
diff --git a/fs/exec.c b/fs/exec.c
index 7f325df5e014..c6e91c3dc2a1 100644
--- a/fs/exec.c
+++ b/fs/exec.c
@@ -747,7 +747,7 @@ static int exec_mmap(struct mm_struct *mm)
static int de_thread(struct task_struct *tsk)
{
struct signal_struct *sig = tsk->signal;
- struct sighand_struct *newsighand, *oldsighand = tsk->sighand;
+ struct sighand_struct *oldsighand = tsk->sighand;
spinlock_t *lock = &oldsighand->siglock;
struct task_struct *leader = NULL;
int count;
@@ -761,10 +761,6 @@ static int de_thread(struct task_struct *tsk)
return 0;
}
- newsighand = kmem_cache_alloc(sighand_cachep, GFP_KERNEL);
- if (!newsighand)
- return -ENOMEM;
-
if (thread_group_empty(tsk))
goto no_thread_group;
@@ -781,7 +777,6 @@ static int de_thread(struct task_struct *tsk)
*/
spin_unlock_irq(lock);
read_unlock(&tasklist_lock);
- kmem_cache_free(sighand_cachep, newsighand);
return -EAGAIN;
}
@@ -899,17 +894,16 @@ no_thread_group:
if (leader)
release_task(leader);
- if (atomic_read(&oldsighand->count) == 1) {
- /*
- * Now that we nuked the rest of the thread group,
- * it turns out we are not sharing sighand any more either.
- * So we can just keep it.
- */
- kmem_cache_free(sighand_cachep, newsighand);
- } else {
+ if (atomic_read(&oldsighand->count) != 1) {
+ struct sighand_struct *newsighand;
/*
- * Move our state over to newsighand and switch it in.
+ * This ->sighand is shared with the CLONE_SIGHAND
+ * but not CLONE_THREAD task, switch to the new one.
*/
+ newsighand = kmem_cache_alloc(sighand_cachep, GFP_KERNEL);
+ if (!newsighand)
+ return -ENOMEM;
+
atomic_set(&newsighand->count, 1);
memcpy(newsighand->action, oldsighand->action,
sizeof(newsighand->action));