summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--arch/x86/include/asm/fpu-internal.h16
-rw-r--r--arch/x86/kernel/fpu/core.c17
2 files changed, 14 insertions, 19 deletions
diff --git a/arch/x86/include/asm/fpu-internal.h b/arch/x86/include/asm/fpu-internal.h
index e180fb96dd0d..c005d1fc1247 100644
--- a/arch/x86/include/asm/fpu-internal.h
+++ b/arch/x86/include/asm/fpu-internal.h
@@ -323,16 +323,6 @@ static inline int restore_fpu_checking(struct task_struct *tsk)
return fpu_restore_checking(&tsk->thread.fpu);
}
-/*
- * Software FPU state helpers. Careful: these need to
- * be preemption protection *and* they need to be
- * properly paired with the CR0.TS changes!
- */
-static inline int __thread_has_fpu(struct task_struct *tsk)
-{
- return tsk->thread.fpu.has_fpu;
-}
-
/* Must be paired with an 'stts' after! */
static inline void __thread_clear_has_fpu(struct task_struct *tsk)
{
@@ -370,13 +360,14 @@ static inline void __thread_fpu_begin(struct task_struct *tsk)
static inline void drop_fpu(struct task_struct *tsk)
{
+ struct fpu *fpu = &tsk->thread.fpu;
/*
* Forget coprocessor state..
*/
preempt_disable();
tsk->thread.fpu.counter = 0;
- if (__thread_has_fpu(tsk)) {
+ if (fpu->has_fpu) {
/* Ignore delayed exceptions from user space */
asm volatile("1: fwait\n"
"2:\n"
@@ -424,6 +415,7 @@ typedef struct { int preload; } fpu_switch_t;
static inline fpu_switch_t switch_fpu_prepare(struct task_struct *old, struct task_struct *new, int cpu)
{
+ struct fpu *old_fpu = &old->thread.fpu;
fpu_switch_t fpu;
/*
@@ -433,7 +425,7 @@ static inline fpu_switch_t switch_fpu_prepare(struct task_struct *old, struct ta
fpu.preload = tsk_used_math(new) &&
(use_eager_fpu() || new->thread.fpu.counter > 5);
- if (__thread_has_fpu(old)) {
+ if (old_fpu->has_fpu) {
if (!fpu_save_init(&old->thread.fpu))
task_disable_lazy_fpu_restore(old);
else
diff --git a/arch/x86/kernel/fpu/core.c b/arch/x86/kernel/fpu/core.c
index 3aeab3f12835..29b837730a07 100644
--- a/arch/x86/kernel/fpu/core.c
+++ b/arch/x86/kernel/fpu/core.c
@@ -57,8 +57,7 @@ static bool interrupted_kernel_fpu_idle(void)
if (use_eager_fpu())
return true;
- return !__thread_has_fpu(current) &&
- (read_cr0() & X86_CR0_TS);
+ return !current->thread.fpu.has_fpu && (read_cr0() & X86_CR0_TS);
}
/*
@@ -93,11 +92,12 @@ EXPORT_SYMBOL(irq_fpu_usable);
void __kernel_fpu_begin(void)
{
struct task_struct *me = current;
+ struct fpu *fpu = &me->thread.fpu;
kernel_fpu_disable();
- if (__thread_has_fpu(me)) {
- fpu_save_init(&me->thread.fpu);
+ if (fpu->has_fpu) {
+ fpu_save_init(fpu);
} else {
this_cpu_write(fpu_owner_task, NULL);
if (!use_eager_fpu())
@@ -109,8 +109,9 @@ EXPORT_SYMBOL(__kernel_fpu_begin);
void __kernel_fpu_end(void)
{
struct task_struct *me = current;
+ struct fpu *fpu = &me->thread.fpu;
- if (__thread_has_fpu(me)) {
+ if (fpu->has_fpu) {
if (WARN_ON(restore_fpu_checking(me)))
fpu_reset_state(me);
} else if (!use_eager_fpu()) {
@@ -128,14 +129,16 @@ EXPORT_SYMBOL(__kernel_fpu_end);
*/
void fpu__save(struct task_struct *tsk)
{
+ struct fpu *fpu = &tsk->thread.fpu;
+
WARN_ON(tsk != current);
preempt_disable();
- if (__thread_has_fpu(tsk)) {
+ if (fpu->has_fpu) {
if (use_eager_fpu()) {
__save_fpu(tsk);
} else {
- fpu_save_init(&tsk->thread.fpu);
+ fpu_save_init(fpu);
__thread_fpu_end(tsk);
}
}