[PATCH RFC] x86,fpu: merge save_init_fpu & unlazy_fpu

From: Rik van Riel
Date: Thu Jan 29 2015 - 16:00:38 EST


The functions save_init_fpu and unlazy_fpu do essentially the
same thing: save the fpu context to memory, and call __thread_fpu_end,
which most of the callers do not need or want.

Get rid of the function unlazy_fpu and make sure save_init_fpu does
what unlazy_fpu does today, including preemption safe state saving
in potentially unusual conditions.

Callers of init_fpu do want __thread_fpu_end, so move the call to
__thread_fpu_end into init_fpu.

I am not sure whether math_error requires __thread_fpu_end. One would
think that it would require that in order to sanitize the math state,
before do_device_not_available and math_state_restore restore it,
but those do not currently seem to do that. I am not sure how a task
that catches SIGFPE is supposed to recover and continue...

Signed-off-by: Rik van Riel <riel@xxxxxxxxxx>
---
arch/x86/include/asm/fpu-internal.h | 18 ++++++++----------
arch/x86/include/asm/i387.h | 2 --
arch/x86/kernel/i387.c | 18 ++++--------------
arch/x86/kernel/traps.c | 1 +
4 files changed, 13 insertions(+), 26 deletions(-)

diff --git a/arch/x86/include/asm/fpu-internal.h b/arch/x86/include/asm/fpu-internal.h
index 0dbc08282291..c7e440ddd269 100644
--- a/arch/x86/include/asm/fpu-internal.h
+++ b/arch/x86/include/asm/fpu-internal.h
@@ -524,16 +524,14 @@ static inline void __save_fpu(struct task_struct *tsk)
*/
static inline void save_init_fpu(struct task_struct *tsk)
{
- WARN_ON_ONCE(!__thread_has_fpu(tsk));
-
- if (use_eager_fpu()) {
- __save_fpu(tsk);
- return;
- }
-
preempt_disable();
- __save_init_fpu(tsk);
- __thread_fpu_end(tsk);
+ if (__thread_has_fpu(tsk)) {
+ if (use_eager_fpu())
+ __save_fpu(tsk);
+ else
+ __save_init_fpu(tsk);
+ } else if (!use_eager_fpu())
+ tsk->thread.fpu_counter = 0;
preempt_enable();
}

@@ -600,7 +598,7 @@ static inline void fpu_copy(struct task_struct *dst, struct task_struct *src)
struct fpu *dfpu = &dst->thread.fpu;
struct fpu *sfpu = &src->thread.fpu;

- unlazy_fpu(src);
+ save_init_fpu(src);
memcpy(dfpu->state, sfpu->state, xstate_size);
}
}
diff --git a/arch/x86/include/asm/i387.h b/arch/x86/include/asm/i387.h
index 6eb6fcb83f63..07836671acfb 100644
--- a/arch/x86/include/asm/i387.h
+++ b/arch/x86/include/asm/i387.h
@@ -101,8 +101,6 @@ static inline int user_has_fpu(void)
return current->thread.fpu.has_fpu;
}

-extern void unlazy_fpu(struct task_struct *tsk);
-
#endif /* __ASSEMBLY__ */

#endif /* _ASM_X86_I387_H */
diff --git a/arch/x86/kernel/i387.c b/arch/x86/kernel/i387.c
index 47348653503a..3afc4e73b07f 100644
--- a/arch/x86/kernel/i387.c
+++ b/arch/x86/kernel/i387.c
@@ -116,18 +116,6 @@ void __kernel_fpu_end(void)
}
EXPORT_SYMBOL(__kernel_fpu_end);

-void unlazy_fpu(struct task_struct *tsk)
-{
- preempt_disable();
- if (__thread_has_fpu(tsk)) {
- __save_init_fpu(tsk);
- __thread_fpu_end(tsk);
- } else
- tsk->thread.fpu_counter = 0;
- preempt_enable();
-}
-EXPORT_SYMBOL(unlazy_fpu);
-
unsigned int mxcsr_feature_mask __read_mostly = 0xffffffffu;
unsigned int xstate_size;
EXPORT_SYMBOL_GPL(xstate_size);
@@ -245,8 +233,10 @@ int init_fpu(struct task_struct *tsk)
int ret;

if (tsk_used_math(tsk)) {
- if (cpu_has_fpu && tsk == current)
- unlazy_fpu(tsk);
+ if (cpu_has_fpu && tsk == current) {
+ save_init_fpu(tsk);
+ __thread_fpu_end(tsk);
+ }
tsk->thread.fpu.last_cpu = ~0;
return 0;
}
diff --git a/arch/x86/kernel/traps.c b/arch/x86/kernel/traps.c
index fb4cb6adf225..201522fd2c96 100644
--- a/arch/x86/kernel/traps.c
+++ b/arch/x86/kernel/traps.c
@@ -664,6 +664,7 @@ static void math_error(struct pt_regs *regs, int error_code, int trapnr)
* Save the info for the exception handler and clear the error.
*/
save_init_fpu(task);
+ __thread_fpu_end(task);
task->thread.trap_nr = trapnr;
task->thread.error_code = error_code;
info.si_signo = SIGFPE;

--
To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
the body of a message to majordomo@xxxxxxxxxxxxxxx
More majordomo info at http://vger.kernel.org/majordomo-info.html
Please read the FAQ at http://www.tux.org/lkml/