diff --git a/arch/x86/include/asm/fpu/internal.h b/arch/x86/include/asm/fpu/internal.h index 44c48e34d799..8529f7bf4233 100644 --- a/arch/x86/include/asm/fpu/internal.h +++ b/arch/x86/include/asm/fpu/internal.h @@ -619,6 +619,20 @@ static inline void switch_fpu_finish(struct fpu *new_fpu) * MXCSR and XCR definitions: */ +static inline u32 stmxcsr(void) +{ + u32 mxcsr; + + asm volatile("stmxcsr %0" : "=m" (mxcsr)); + + return mxcsr; +} + +static inline void ldmxcsr(u32 mxcsr) +{ + asm volatile("ldmxcsr %0" :: "m" (mxcsr)); +} + extern unsigned int mxcsr_feature_mask; #define XCR_XFEATURE_ENABLED_MASK 0x00000000 diff --git a/arch/x86/kernel/fpu/core.c b/arch/x86/kernel/fpu/core.c index 12c70840980e..8bc275f55625 100644 --- a/arch/x86/kernel/fpu/core.c +++ b/arch/x86/kernel/fpu/core.c @@ -38,6 +38,8 @@ union fpregs_state init_fpstate __read_mostly; */ static DEFINE_PER_CPU(bool, in_kernel_fpu); +static DEFINE_PER_CPU(u32, usr_mxcsr); + /* * Track which context is using the FPU on the CPU: */ @@ -84,6 +86,8 @@ EXPORT_SYMBOL(irq_fpu_usable); void kernel_fpu_begin(void) { + u32 mxcsr; + preempt_disable(); WARN_ON_FPU(!irq_fpu_usable()); @@ -100,15 +104,28 @@ void kernel_fpu_begin(void) */ copy_fpregs_to_fpstate(¤t->thread.fpu); } + + mxcsr = stmxcsr(); + if (mxcsr != MXCSR_DEFAULT) { + this_cpu_write(usr_mxcsr, mxcsr); + ldmxcsr(MXCSR_DEFAULT); + } + __cpu_invalidate_fpregs_state(); } EXPORT_SYMBOL_GPL(kernel_fpu_begin); void kernel_fpu_end(void) { + u32 mxcsr = this_cpu_read(usr_mxcsr); + WARN_ON_FPU(!this_cpu_read(in_kernel_fpu)); this_cpu_write(in_kernel_fpu, false); + + if (mxcsr != MXCSR_DEFAULT) + ldmxcsr(mxcsr); + preempt_enable(); } EXPORT_SYMBOL_GPL(kernel_fpu_end);