From 54d8d3b5a0ce1cdbad1d3154c9ea9732d394e9c7 Mon Sep 17 00:00:00 2001 From: Paolo 'Blaisorblade' Giarrusso Date: Fri, 31 Mar 2006 02:30:24 -0800 Subject: [PATCH] uml: add arch_switch_to for newly forked thread Newly forked threads have no arch_switch_to_skas() called before their first run, because when schedule() switches to them they're resumed in the body of thread_wait() inside fork_handler() rather than in switch_threads() in switch_to_skas(). Compensate this missing call. Signed-off-by: Paolo 'Blaisorblade' Giarrusso Acked-by: Jeff Dike Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- arch/um/kernel/skas/process_kern.c | 7 +++++++ arch/um/sys-i386/ptrace.c | 9 ++++++++- arch/um/sys-i386/tls.c | 13 ++++++++++--- 3 files changed, 25 insertions(+), 4 deletions(-) (limited to 'arch') diff --git a/arch/um/kernel/skas/process_kern.c b/arch/um/kernel/skas/process_kern.c index 38b185370c42..2135eaf98a93 100644 --- a/arch/um/kernel/skas/process_kern.c +++ b/arch/um/kernel/skas/process_kern.c @@ -91,10 +91,17 @@ void fork_handler(int sig) panic("blech"); schedule_tail(current->thread.prev_sched); + + /* XXX: if interrupt_end() calls schedule, this call to + * arch_switch_to_skas isn't needed. We could want to apply this to + * improve performance. -bb */ + arch_switch_to_skas(current->thread.prev_sched, current); + current->thread.prev_sched = NULL; /* Handle any immediate reschedules or signals */ interrupt_end(); + userspace(¤t->thread.regs.regs); } diff --git a/arch/um/sys-i386/ptrace.c b/arch/um/sys-i386/ptrace.c index 6a23cc6947c3..6028bc7cc01b 100644 --- a/arch/um/sys-i386/ptrace.c +++ b/arch/um/sys-i386/ptrace.c @@ -23,7 +23,14 @@ void arch_switch_to_tt(struct task_struct *from, struct task_struct *to) void arch_switch_to_skas(struct task_struct *from, struct task_struct *to) { - arch_switch_tls_skas(from, to); + int err = arch_switch_tls_skas(from, to); + if (!err) + return; + + if (err != -EINVAL) + printk(KERN_WARNING "arch_switch_tls_skas failed, errno %d, not EINVAL\n", -err); + else + printk(KERN_WARNING "arch_switch_tls_skas failed, errno = EINVAL\n"); } int is_syscall(unsigned long addr) diff --git a/arch/um/sys-i386/tls.c b/arch/um/sys-i386/tls.c index e3c5bc593fae..2251654c6b45 100644 --- a/arch/um/sys-i386/tls.c +++ b/arch/um/sys-i386/tls.c @@ -70,8 +70,6 @@ static int get_free_idx(struct task_struct* task) return -ESRCH; } -#define O_FORCE 1 - static inline void clear_user_desc(struct user_desc* info) { /* Postcondition: LDT_empty(info) returns true. */ @@ -84,6 +82,8 @@ static inline void clear_user_desc(struct user_desc* info) info->seg_not_present = 1; } +#define O_FORCE 1 + static int load_TLS(int flags, struct task_struct *to) { int ret = 0; @@ -162,7 +162,13 @@ void clear_flushed_tls(struct task_struct *task) * SKAS patch. */ int arch_switch_tls_skas(struct task_struct *from, struct task_struct *to) { - return load_TLS(O_FORCE, to); + /* We have no need whatsoever to switch TLS for kernel threads; beyond + * that, that would also result in us calling os_set_thread_area with + * userspace_pid[cpu] == 0, which gives an error. */ + if (likely(to->mm)) + return load_TLS(O_FORCE, to); + + return 0; } int arch_switch_tls_tt(struct task_struct *from, struct task_struct *to) @@ -324,3 +330,4 @@ int ptrace_get_thread_area(struct task_struct *child, int idx, out: return ret; } + -- cgit v1.2.3