summaryrefslogtreecommitdiffstats
path: root/arch/x86
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2012-02-20 10:24:09 -0800
committerLinus Torvalds <torvalds@linux-foundation.org>2012-02-20 10:24:09 -0800
commitcea20ca3f3181fc36788a15bc65d1062b96a0a6c (patch)
tree86c646c0d228d1fe265536b81d86af406ff92c8e /arch/x86
parentb01543dfe67bb1d191998e90d20534dc354de059 (diff)
downloadlinux-cea20ca3f3181fc36788a15bc65d1062b96a0a6c.tar.gz
linux-cea20ca3f3181fc36788a15bc65d1062b96a0a6c.tar.bz2
linux-cea20ca3f3181fc36788a15bc65d1062b96a0a6c.zip
i387: fix up some fpu_counter confusion
This makes sure we clear the FPU usage counter for newly created tasks, just so that we start off in a known state (for example, don't try to preload the FPU state on the first task switch etc). It also fixes a thinko in when we increment the fpu_counter at task switch time, introduced by commit 34ddc81a230b ("i387: re-introduce FPU state preloading at context switch time"). We should increment the *new* task fpu_counter, not the old task, and only if we decide to use that state (whether lazily or preloaded). Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'arch/x86')
-rw-r--r--arch/x86/include/asm/i387.h3
-rw-r--r--arch/x86/kernel/process_32.c1
-rw-r--r--arch/x86/kernel/process_64.c1
3 files changed, 4 insertions, 1 deletions
diff --git a/arch/x86/include/asm/i387.h b/arch/x86/include/asm/i387.h
index a850b4d8d14d..8df95849721d 100644
--- a/arch/x86/include/asm/i387.h
+++ b/arch/x86/include/asm/i387.h
@@ -348,10 +348,10 @@ static inline fpu_switch_t switch_fpu_prepare(struct task_struct *old, struct ta
if (__save_init_fpu(old))
fpu_lazy_state_intact(old);
__thread_clear_has_fpu(old);
- old->fpu_counter++;
/* Don't change CR0.TS if we just switch! */
if (fpu.preload) {
+ new->fpu_counter++;
__thread_set_has_fpu(new);
prefetch(new->thread.fpu.state);
} else
@@ -359,6 +359,7 @@ static inline fpu_switch_t switch_fpu_prepare(struct task_struct *old, struct ta
} else {
old->fpu_counter = 0;
if (fpu.preload) {
+ new->fpu_counter++;
if (fpu_lazy_restore(new))
fpu.preload = 0;
else
diff --git a/arch/x86/kernel/process_32.c b/arch/x86/kernel/process_32.c
index 80bfe1ab0031..bc32761bc27a 100644
--- a/arch/x86/kernel/process_32.c
+++ b/arch/x86/kernel/process_32.c
@@ -214,6 +214,7 @@ int copy_thread(unsigned long clone_flags, unsigned long sp,
task_user_gs(p) = get_user_gs(regs);
+ p->fpu_counter = 0;
p->thread.io_bitmap_ptr = NULL;
tsk = current;
err = -ENOMEM;
diff --git a/arch/x86/kernel/process_64.c b/arch/x86/kernel/process_64.c
index 1fd94bc4279d..8ad880b3bc1c 100644
--- a/arch/x86/kernel/process_64.c
+++ b/arch/x86/kernel/process_64.c
@@ -286,6 +286,7 @@ int copy_thread(unsigned long clone_flags, unsigned long sp,
set_tsk_thread_flag(p, TIF_FORK);
+ p->fpu_counter = 0;
p->thread.io_bitmap_ptr = NULL;
savesegment(gs, p->thread.gsindex);