summaryrefslogtreecommitdiffstats
path: root/arch/x86/include/asm/fpu-internal.h
diff options
context:
space:
mode:
authorIngo Molnar <mingo@kernel.org>2015-04-22 09:52:56 +0200
committerIngo Molnar <mingo@kernel.org>2015-05-19 15:47:14 +0200
commitc0c2803dee21bef08ef5aacdf96fe2f1759ccc62 (patch)
treed3f75137f0589eb3214e23bcaab9d2919b31465b /arch/x86/include/asm/fpu-internal.h
parent3f6a0bce90289e0980b4250ccb03b765860247ee (diff)
downloadlinux-c0c2803dee21bef08ef5aacdf96fe2f1759ccc62.tar.gz
linux-c0c2803dee21bef08ef5aacdf96fe2f1759ccc62.tar.bz2
linux-c0c2803dee21bef08ef5aacdf96fe2f1759ccc62.zip
x86/fpu: Move thread_info::fpu_counter into thread_info::fpu.counter
This field is kept separate from the main FPU state structure for no good reason. Reviewed-by: Borislav Petkov <bp@alien8.de> Cc: Andy Lutomirski <luto@amacapital.net> Cc: Dave Hansen <dave.hansen@linux.intel.com> Cc: Fenghua Yu <fenghua.yu@intel.com> Cc: H. Peter Anvin <hpa@zytor.com> Cc: Linus Torvalds <torvalds@linux-foundation.org> Cc: Oleg Nesterov <oleg@redhat.com> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Thomas Gleixner <tglx@linutronix.de> Signed-off-by: Ingo Molnar <mingo@kernel.org>
Diffstat (limited to 'arch/x86/include/asm/fpu-internal.h')
-rw-r--r--arch/x86/include/asm/fpu-internal.h10
1 files changed, 5 insertions, 5 deletions
diff --git a/arch/x86/include/asm/fpu-internal.h b/arch/x86/include/asm/fpu-internal.h
index 02e0e97d8be7..f85d21b68901 100644
--- a/arch/x86/include/asm/fpu-internal.h
+++ b/arch/x86/include/asm/fpu-internal.h
@@ -384,7 +384,7 @@ static inline void drop_fpu(struct task_struct *tsk)
* Forget coprocessor state..
*/
preempt_disable();
- tsk->thread.fpu_counter = 0;
+ tsk->thread.fpu.counter = 0;
if (__thread_has_fpu(tsk)) {
/* Ignore delayed exceptions from user space */
@@ -441,7 +441,7 @@ static inline fpu_switch_t switch_fpu_prepare(struct task_struct *old, struct ta
* or if the past 5 consecutive context-switches used math.
*/
fpu.preload = tsk_used_math(new) &&
- (use_eager_fpu() || new->thread.fpu_counter > 5);
+ (use_eager_fpu() || new->thread.fpu.counter > 5);
if (__thread_has_fpu(old)) {
if (!__save_init_fpu(old))
@@ -454,16 +454,16 @@ static inline fpu_switch_t switch_fpu_prepare(struct task_struct *old, struct ta
/* Don't change CR0.TS if we just switch! */
if (fpu.preload) {
- new->thread.fpu_counter++;
+ new->thread.fpu.counter++;
__thread_set_has_fpu(new);
prefetch(new->thread.fpu.state);
} else if (!use_eager_fpu())
stts();
} else {
- old->thread.fpu_counter = 0;
+ old->thread.fpu.counter = 0;
task_disable_lazy_fpu_restore(old);
if (fpu.preload) {
- new->thread.fpu_counter++;
+ new->thread.fpu.counter++;
if (fpu_lazy_restore(new, cpu))
fpu.preload = 0;
else