[patch 18/41] x86/fpu: Rename copy_fpregs_to_fpstate() to save_fpregs_to_fpstate()

From: Thomas Gleixner
Date: Fri Jun 11 2021 - 12:44:59 EST


A copy is guaranteed to leave the source intact, which is not the case when
FNSAVE is used as that reinitilizes the registers.

Rename it to save_fpregs_to_fpstate() which does not make such guarantees.

Signed-off-by: Thomas Gleixner <tglx@xxxxxxxxxxxxx>
---
arch/x86/include/asm/fpu/internal.h | 4 ++--
arch/x86/kernel/fpu/core.c | 10 +++++-----
arch/x86/kvm/x86.c | 2 +-
3 files changed, 8 insertions(+), 8 deletions(-)

--- a/arch/x86/include/asm/fpu/internal.h
+++ b/arch/x86/include/asm/fpu/internal.h
@@ -395,7 +395,7 @@ static inline int xrstor_from_kernel_err
return err;
}

-extern int copy_fpregs_to_fpstate(struct fpu *fpu);
+extern int save_fpregs_to_fpstate(struct fpu *fpu);

static inline void __copy_kernel_to_fpregs(union fpregs_state *fpstate, u64 mask)
{
@@ -527,7 +527,7 @@ static inline void __fpregs_load_activat
static inline void switch_fpu_prepare(struct fpu *old_fpu, int cpu)
{
if (static_cpu_has(X86_FEATURE_FPU) && !(current->flags & PF_KTHREAD)) {
- if (!copy_fpregs_to_fpstate(old_fpu))
+ if (!save_fpregs_to_fpstate(old_fpu))
old_fpu->last_cpu = -1;
else
old_fpu->last_cpu = cpu;
--- a/arch/x86/kernel/fpu/core.c
+++ b/arch/x86/kernel/fpu/core.c
@@ -92,7 +92,7 @@ EXPORT_SYMBOL(irq_fpu_usable);
* Modern FPU state can be kept in registers, if there are
* no pending FP exceptions.
*/
-int copy_fpregs_to_fpstate(struct fpu *fpu)
+int save_fpregs_to_fpstate(struct fpu *fpu)
{
if (likely(use_xsave())) {
xsave_to_kernel(&fpu->state.xsave);
@@ -119,7 +119,7 @@ int copy_fpregs_to_fpstate(struct fpu *f

return 0;
}
-EXPORT_SYMBOL(copy_fpregs_to_fpstate);
+EXPORT_SYMBOL(save_fpregs_to_fpstate);

void kernel_fpu_begin_mask(unsigned int kfpu_mask)
{
@@ -137,7 +137,7 @@ void kernel_fpu_begin_mask(unsigned int
* Ignore return value -- we don't care if reg state
* is clobbered.
*/
- copy_fpregs_to_fpstate(&current->thread.fpu);
+ save_fpregs_to_fpstate(&current->thread.fpu);
}
__cpu_invalidate_fpregs_state();

@@ -172,7 +172,7 @@ void fpu__save(struct fpu *fpu)
trace_x86_fpu_before_save(fpu);

if (!test_thread_flag(TIF_NEED_FPU_LOAD)) {
- if (!copy_fpregs_to_fpstate(fpu)) {
+ if (!save_fpregs_to_fpstate(fpu)) {
copy_kernel_to_fpregs(&fpu->state);
}
}
@@ -255,7 +255,7 @@ int fpu__copy(struct task_struct *dst, s
if (test_thread_flag(TIF_NEED_FPU_LOAD))
memcpy(&dst_fpu->state, &src_fpu->state, fpu_kernel_xstate_size);

- else if (!copy_fpregs_to_fpstate(dst_fpu))
+ else if (!save_fpregs_to_fpstate(dst_fpu))
copy_kernel_to_fpregs(&dst_fpu->state);

fpregs_unlock();
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -9618,7 +9618,7 @@ static void kvm_save_current_fpu(struct
memcpy(&fpu->state, &current->thread.fpu.state,
fpu_kernel_xstate_size);
else
- copy_fpregs_to_fpstate(fpu);
+ save_fpregs_to_fpstate(fpu);
}

/* Swap (qemu) user FPU context for the guest FPU context. */