• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  *  Copyright (C) 1994 Linus Torvalds
4  *
5  *  Pentium III FXSR, SSE support
6  *  General FPU state handling cleanups
7  *	Gareth Hughes <gareth@valinux.com>, May 2000
8  */
9 #include <asm/fpu/internal.h>
10 #include <asm/fpu/regset.h>
11 #include <asm/fpu/signal.h>
12 #include <asm/fpu/types.h>
13 #include <asm/traps.h>
14 #include <asm/irq_regs.h>
15 
16 #include <linux/hardirq.h>
17 #include <linux/pkeys.h>
18 
19 #define CREATE_TRACE_POINTS
20 #include <asm/trace/fpu.h>
21 
22 /*
23  * Represents the initial FPU state. It's mostly (but not completely) zeroes,
24  * depending on the FPU hardware format:
25  */
26 union fpregs_state init_fpstate __ro_after_init;
27 
28 /* Track in-kernel FPU usage */
29 static DEFINE_PER_CPU(bool, in_kernel_fpu);
30 
31 /*
32  * Track which context is using the FPU on the CPU:
33  */
34 DEFINE_PER_CPU(struct fpu *, fpu_fpregs_owner_ctx);
35 
36 /*
37  * Can we use the FPU in kernel mode with the
38  * whole "kernel_fpu_begin/end()" sequence?
39  */
irq_fpu_usable(void)40 bool irq_fpu_usable(void)
41 {
42 	if (WARN_ON_ONCE(in_nmi()))
43 		return false;
44 
45 	/* In kernel FPU usage already active? */
46 	if (this_cpu_read(in_kernel_fpu))
47 		return false;
48 
49 	/*
50 	 * When not in NMI or hard interrupt context, FPU can be used in:
51 	 *
52 	 * - Task context except from within fpregs_lock()'ed critical
53 	 *   regions.
54 	 *
55 	 * - Soft interrupt processing context which cannot happen
56 	 *   while in a fpregs_lock()'ed critical region.
57 	 */
58 	if (!in_hardirq())
59 		return true;
60 
61 	/*
62 	 * In hard interrupt context it's safe when soft interrupts
63 	 * are enabled, which means the interrupt did not hit in
64 	 * a fpregs_lock()'ed critical region.
65 	 */
66 	return !softirq_count();
67 }
68 EXPORT_SYMBOL(irq_fpu_usable);
69 
70 /*
71  * Save the FPU register state in fpu->state. The register state is
72  * preserved.
73  *
74  * Must be called with fpregs_lock() held.
75  *
76  * The legacy FNSAVE instruction clears all FPU state unconditionally, so
77  * register state has to be reloaded. That might be a pointless exercise
78  * when the FPU is going to be used by another task right after that. But
79  * this only affects 20+ years old 32bit systems and avoids conditionals all
80  * over the place.
81  *
82  * FXSAVE and all XSAVE variants preserve the FPU register state.
83  */
save_fpregs_to_fpstate(struct fpu * fpu)84 void save_fpregs_to_fpstate(struct fpu *fpu)
85 {
86 	if (likely(use_xsave())) {
87 		os_xsave(&fpu->state.xsave);
88 
89 		/*
90 		 * AVX512 state is tracked here because its use is
91 		 * known to slow the max clock speed of the core.
92 		 */
93 		if (fpu->state.xsave.header.xfeatures & XFEATURE_MASK_AVX512)
94 			fpu->avx512_timestamp = jiffies;
95 		return;
96 	}
97 
98 	if (likely(use_fxsr())) {
99 		fxsave(&fpu->state.fxsave);
100 		return;
101 	}
102 
103 	/*
104 	 * Legacy FPU register saving, FNSAVE always clears FPU registers,
105 	 * so we have to reload them from the memory state.
106 	 */
107 	asm volatile("fnsave %[fp]; fwait" : [fp] "=m" (fpu->state.fsave));
108 	frstor(&fpu->state.fsave);
109 }
110 EXPORT_SYMBOL(save_fpregs_to_fpstate);
111 
__restore_fpregs_from_fpstate(union fpregs_state * fpstate,u64 mask)112 void __restore_fpregs_from_fpstate(union fpregs_state *fpstate, u64 mask)
113 {
114 	/*
115 	 * AMD K7/K8 and later CPUs up to Zen don't save/restore
116 	 * FDP/FIP/FOP unless an exception is pending. Clear the x87 state
117 	 * here by setting it to fixed values.  "m" is a random variable
118 	 * that should be in L1.
119 	 */
120 	if (unlikely(static_cpu_has_bug(X86_BUG_FXSAVE_LEAK))) {
121 		asm volatile(
122 			"fnclex\n\t"
123 			"emms\n\t"
124 			"fildl %P[addr]"	/* set F?P to defined value */
125 			: : [addr] "m" (fpstate));
126 	}
127 
128 	if (use_xsave()) {
129 		os_xrstor(&fpstate->xsave, mask);
130 	} else {
131 		if (use_fxsr())
132 			fxrstor(&fpstate->fxsave);
133 		else
134 			frstor(&fpstate->fsave);
135 	}
136 }
137 EXPORT_SYMBOL_GPL(__restore_fpregs_from_fpstate);
138 
kernel_fpu_begin_mask(unsigned int kfpu_mask)139 void kernel_fpu_begin_mask(unsigned int kfpu_mask)
140 {
141 	preempt_disable();
142 
143 	WARN_ON_FPU(!irq_fpu_usable());
144 	WARN_ON_FPU(this_cpu_read(in_kernel_fpu));
145 
146 	this_cpu_write(in_kernel_fpu, true);
147 
148 	if (!(current->flags & PF_KTHREAD) &&
149 	    !test_thread_flag(TIF_NEED_FPU_LOAD)) {
150 		set_thread_flag(TIF_NEED_FPU_LOAD);
151 		save_fpregs_to_fpstate(&current->thread.fpu);
152 	}
153 	__cpu_invalidate_fpregs_state();
154 
155 	/* Put sane initial values into the control registers. */
156 	if (likely(kfpu_mask & KFPU_MXCSR) && boot_cpu_has(X86_FEATURE_XMM))
157 		ldmxcsr(MXCSR_DEFAULT);
158 
159 	if (unlikely(kfpu_mask & KFPU_387) && boot_cpu_has(X86_FEATURE_FPU))
160 		asm volatile ("fninit");
161 }
162 EXPORT_SYMBOL_GPL(kernel_fpu_begin_mask);
163 
kernel_fpu_end(void)164 void kernel_fpu_end(void)
165 {
166 	WARN_ON_FPU(!this_cpu_read(in_kernel_fpu));
167 
168 	this_cpu_write(in_kernel_fpu, false);
169 	preempt_enable();
170 }
171 EXPORT_SYMBOL_GPL(kernel_fpu_end);
172 
173 /*
174  * Sync the FPU register state to current's memory register state when the
175  * current task owns the FPU. The hardware register state is preserved.
176  */
fpu_sync_fpstate(struct fpu * fpu)177 void fpu_sync_fpstate(struct fpu *fpu)
178 {
179 	WARN_ON_FPU(fpu != &current->thread.fpu);
180 
181 	fpregs_lock();
182 	trace_x86_fpu_before_save(fpu);
183 
184 	if (!test_thread_flag(TIF_NEED_FPU_LOAD))
185 		save_fpregs_to_fpstate(fpu);
186 
187 	trace_x86_fpu_after_save(fpu);
188 	fpregs_unlock();
189 }
190 
fpstate_init_xstate(struct xregs_state * xsave)191 static inline void fpstate_init_xstate(struct xregs_state *xsave)
192 {
193 	/*
194 	 * XRSTORS requires these bits set in xcomp_bv, or it will
195 	 * trigger #GP:
196 	 */
197 	xsave->header.xcomp_bv = XCOMP_BV_COMPACTED_FORMAT | xfeatures_mask_all;
198 }
199 
fpstate_init_fxstate(struct fxregs_state * fx)200 static inline void fpstate_init_fxstate(struct fxregs_state *fx)
201 {
202 	fx->cwd = 0x37f;
203 	fx->mxcsr = MXCSR_DEFAULT;
204 }
205 
206 /*
207  * Legacy x87 fpstate state init:
208  */
fpstate_init_fstate(struct fregs_state * fp)209 static inline void fpstate_init_fstate(struct fregs_state *fp)
210 {
211 	fp->cwd = 0xffff037fu;
212 	fp->swd = 0xffff0000u;
213 	fp->twd = 0xffffffffu;
214 	fp->fos = 0xffff0000u;
215 }
216 
fpstate_init(union fpregs_state * state)217 void fpstate_init(union fpregs_state *state)
218 {
219 	if (!static_cpu_has(X86_FEATURE_FPU)) {
220 		fpstate_init_soft(&state->soft);
221 		return;
222 	}
223 
224 	memset(state, 0, fpu_kernel_xstate_size);
225 
226 	if (static_cpu_has(X86_FEATURE_XSAVES))
227 		fpstate_init_xstate(&state->xsave);
228 	if (static_cpu_has(X86_FEATURE_FXSR))
229 		fpstate_init_fxstate(&state->fxsave);
230 	else
231 		fpstate_init_fstate(&state->fsave);
232 }
233 EXPORT_SYMBOL_GPL(fpstate_init);
234 
235 /* Clone current's FPU state on fork */
fpu_clone(struct task_struct * dst)236 int fpu_clone(struct task_struct *dst)
237 {
238 	struct fpu *src_fpu = &current->thread.fpu;
239 	struct fpu *dst_fpu = &dst->thread.fpu;
240 
241 	/* The new task's FPU state cannot be valid in the hardware. */
242 	dst_fpu->last_cpu = -1;
243 
244 	if (!cpu_feature_enabled(X86_FEATURE_FPU))
245 		return 0;
246 
247 	/*
248 	 * Don't let 'init optimized' areas of the XSAVE area
249 	 * leak into the child task:
250 	 */
251 	memset(&dst_fpu->state.xsave, 0, fpu_kernel_xstate_size);
252 
253 	/*
254 	 * If the FPU registers are not owned by current just memcpy() the
255 	 * state.  Otherwise save the FPU registers directly into the
256 	 * child's FPU context, without any memory-to-memory copying.
257 	 */
258 	fpregs_lock();
259 	if (test_thread_flag(TIF_NEED_FPU_LOAD))
260 		memcpy(&dst_fpu->state, &src_fpu->state, fpu_kernel_xstate_size);
261 
262 	else
263 		save_fpregs_to_fpstate(dst_fpu);
264 	fpregs_unlock();
265 
266 	set_tsk_thread_flag(dst, TIF_NEED_FPU_LOAD);
267 
268 	trace_x86_fpu_copy_src(src_fpu);
269 	trace_x86_fpu_copy_dst(dst_fpu);
270 
271 	return 0;
272 }
273 
274 /*
275  * Drops current FPU state: deactivates the fpregs and
276  * the fpstate. NOTE: it still leaves previous contents
277  * in the fpregs in the eager-FPU case.
278  *
279  * This function can be used in cases where we know that
280  * a state-restore is coming: either an explicit one,
281  * or a reschedule.
282  */
fpu__drop(struct fpu * fpu)283 void fpu__drop(struct fpu *fpu)
284 {
285 	preempt_disable();
286 
287 	if (fpu == &current->thread.fpu) {
288 		/* Ignore delayed exceptions from user space */
289 		asm volatile("1: fwait\n"
290 			     "2:\n"
291 			     _ASM_EXTABLE(1b, 2b));
292 		fpregs_deactivate(fpu);
293 	}
294 
295 	trace_x86_fpu_dropped(fpu);
296 
297 	preempt_enable();
298 }
299 
300 /*
301  * Clear FPU registers by setting them up from the init fpstate.
302  * Caller must do fpregs_[un]lock() around it.
303  */
restore_fpregs_from_init_fpstate(u64 features_mask)304 static inline void restore_fpregs_from_init_fpstate(u64 features_mask)
305 {
306 	if (use_xsave())
307 		os_xrstor(&init_fpstate.xsave, features_mask);
308 	else if (use_fxsr())
309 		fxrstor(&init_fpstate.fxsave);
310 	else
311 		frstor(&init_fpstate.fsave);
312 
313 	pkru_write_default();
314 }
315 
init_fpstate_copy_size(void)316 static inline unsigned int init_fpstate_copy_size(void)
317 {
318 	if (!use_xsave())
319 		return fpu_kernel_xstate_size;
320 
321 	/* XSAVE(S) just needs the legacy and the xstate header part */
322 	return sizeof(init_fpstate.xsave);
323 }
324 
325 /*
326  * Reset current->fpu memory state to the init values.
327  */
fpu_reset_fpstate(void)328 static void fpu_reset_fpstate(void)
329 {
330 	struct fpu *fpu = &current->thread.fpu;
331 
332 	fpregs_lock();
333 	__fpu_invalidate_fpregs_state(fpu);
334 	/*
335 	 * This does not change the actual hardware registers. It just
336 	 * resets the memory image and sets TIF_NEED_FPU_LOAD so a
337 	 * subsequent return to usermode will reload the registers from the
338 	 * task's memory image.
339 	 *
340 	 * Do not use fpstate_init() here. Just copy init_fpstate which has
341 	 * the correct content already except for PKRU.
342 	 *
343 	 * PKRU handling does not rely on the xstate when restoring for
344 	 * user space as PKRU is eagerly written in switch_to() and
345 	 * flush_thread().
346 	 */
347 	memcpy(&fpu->state, &init_fpstate, init_fpstate_copy_size());
348 	set_thread_flag(TIF_NEED_FPU_LOAD);
349 	fpregs_unlock();
350 }
351 
352 /*
353  * Reset current's user FPU states to the init states.  current's
354  * supervisor states, if any, are not modified by this function.  The
355  * caller guarantees that the XSTATE header in memory is intact.
356  */
fpu__clear_user_states(struct fpu * fpu)357 void fpu__clear_user_states(struct fpu *fpu)
358 {
359 	WARN_ON_FPU(fpu != &current->thread.fpu);
360 
361 	fpregs_lock();
362 	if (!cpu_feature_enabled(X86_FEATURE_FPU)) {
363 		fpu_reset_fpstate();
364 		fpregs_unlock();
365 		return;
366 	}
367 
368 	/*
369 	 * Ensure that current's supervisor states are loaded into their
370 	 * corresponding registers.
371 	 */
372 	if (xfeatures_mask_supervisor() &&
373 	    !fpregs_state_valid(fpu, smp_processor_id())) {
374 		os_xrstor(&fpu->state.xsave, xfeatures_mask_supervisor());
375 	}
376 
377 	/* Reset user states in registers. */
378 	restore_fpregs_from_init_fpstate(xfeatures_mask_restore_user());
379 
380 	/*
381 	 * Now all FPU registers have their desired values.  Inform the FPU
382 	 * state machine that current's FPU registers are in the hardware
383 	 * registers. The memory image does not need to be updated because
384 	 * any operation relying on it has to save the registers first when
385 	 * current's FPU is marked active.
386 	 */
387 	fpregs_mark_activate();
388 	fpregs_unlock();
389 }
390 
fpu_flush_thread(void)391 void fpu_flush_thread(void)
392 {
393 	fpu_reset_fpstate();
394 }
395 /*
396  * Load FPU context before returning to userspace.
397  */
switch_fpu_return(void)398 void switch_fpu_return(void)
399 {
400 	if (!static_cpu_has(X86_FEATURE_FPU))
401 		return;
402 
403 	fpregs_restore_userregs();
404 }
405 EXPORT_SYMBOL_GPL(switch_fpu_return);
406 
407 #ifdef CONFIG_X86_DEBUG_FPU
408 /*
409  * If current FPU state according to its tracking (loaded FPU context on this
410  * CPU) is not valid then we must have TIF_NEED_FPU_LOAD set so the context is
411  * loaded on return to userland.
412  */
fpregs_assert_state_consistent(void)413 void fpregs_assert_state_consistent(void)
414 {
415 	struct fpu *fpu = &current->thread.fpu;
416 
417 	if (test_thread_flag(TIF_NEED_FPU_LOAD))
418 		return;
419 
420 	WARN_ON_FPU(!fpregs_state_valid(fpu, smp_processor_id()));
421 }
422 EXPORT_SYMBOL_GPL(fpregs_assert_state_consistent);
423 #endif
424 
fpregs_mark_activate(void)425 void fpregs_mark_activate(void)
426 {
427 	struct fpu *fpu = &current->thread.fpu;
428 
429 	fpregs_activate(fpu);
430 	fpu->last_cpu = smp_processor_id();
431 	clear_thread_flag(TIF_NEED_FPU_LOAD);
432 }
433 EXPORT_SYMBOL_GPL(fpregs_mark_activate);
434 
435 /*
436  * x87 math exception handling:
437  */
438 
fpu__exception_code(struct fpu * fpu,int trap_nr)439 int fpu__exception_code(struct fpu *fpu, int trap_nr)
440 {
441 	int err;
442 
443 	if (trap_nr == X86_TRAP_MF) {
444 		unsigned short cwd, swd;
445 		/*
446 		 * (~cwd & swd) will mask out exceptions that are not set to unmasked
447 		 * status.  0x3f is the exception bits in these regs, 0x200 is the
448 		 * C1 reg you need in case of a stack fault, 0x040 is the stack
449 		 * fault bit.  We should only be taking one exception at a time,
450 		 * so if this combination doesn't produce any single exception,
451 		 * then we have a bad program that isn't synchronizing its FPU usage
452 		 * and it will suffer the consequences since we won't be able to
453 		 * fully reproduce the context of the exception.
454 		 */
455 		if (boot_cpu_has(X86_FEATURE_FXSR)) {
456 			cwd = fpu->state.fxsave.cwd;
457 			swd = fpu->state.fxsave.swd;
458 		} else {
459 			cwd = (unsigned short)fpu->state.fsave.cwd;
460 			swd = (unsigned short)fpu->state.fsave.swd;
461 		}
462 
463 		err = swd & ~cwd;
464 	} else {
465 		/*
466 		 * The SIMD FPU exceptions are handled a little differently, as there
467 		 * is only a single status/control register.  Thus, to determine which
468 		 * unmasked exception was caught we must mask the exception mask bits
469 		 * at 0x1f80, and then use these to mask the exception bits at 0x3f.
470 		 */
471 		unsigned short mxcsr = MXCSR_DEFAULT;
472 
473 		if (boot_cpu_has(X86_FEATURE_XMM))
474 			mxcsr = fpu->state.fxsave.mxcsr;
475 
476 		err = ~(mxcsr >> 7) & mxcsr;
477 	}
478 
479 	if (err & 0x001) {	/* Invalid op */
480 		/*
481 		 * swd & 0x240 == 0x040: Stack Underflow
482 		 * swd & 0x240 == 0x240: Stack Overflow
483 		 * User must clear the SF bit (0x40) if set
484 		 */
485 		return FPE_FLTINV;
486 	} else if (err & 0x004) { /* Divide by Zero */
487 		return FPE_FLTDIV;
488 	} else if (err & 0x008) { /* Overflow */
489 		return FPE_FLTOVF;
490 	} else if (err & 0x012) { /* Denormal, Underflow */
491 		return FPE_FLTUND;
492 	} else if (err & 0x020) { /* Precision */
493 		return FPE_FLTRES;
494 	}
495 
496 	/*
497 	 * If we're using IRQ 13, or supposedly even some trap
498 	 * X86_TRAP_MF implementations, it's possible
499 	 * we get a spurious trap, which is not an error.
500 	 */
501 	return 0;
502 }
503