1 /*
2 * Copyright (C) 1994 Linus Torvalds
3 *
4 * Pentium III FXSR, SSE support
5 * General FPU state handling cleanups
6 * Gareth Hughes <gareth@valinux.com>, May 2000
7 */
8 #include <asm/fpu/internal.h>
9 #include <asm/fpu/regset.h>
10 #include <asm/fpu/signal.h>
11 #include <asm/fpu/types.h>
12 #include <asm/traps.h>
13
14 #include <linux/hardirq.h>
15 #include <linux/pkeys.h>
16
17 #define CREATE_TRACE_POINTS
18 #include <asm/trace/fpu.h>
19
20 /*
21 * Represents the initial FPU state. It's mostly (but not completely) zeroes,
22 * depending on the FPU hardware format:
23 */
24 union fpregs_state init_fpstate __read_mostly;
25
26 /*
27 * Track whether the kernel is using the FPU state
28 * currently.
29 *
30 * This flag is used:
31 *
32 * - by IRQ context code to potentially use the FPU
33 * if it's unused.
34 *
35 * - to debug kernel_fpu_begin()/end() correctness
36 */
37 static DEFINE_PER_CPU(bool, in_kernel_fpu);
38
39 /*
40 * Track which context is using the FPU on the CPU:
41 */
42 DEFINE_PER_CPU(struct fpu *, fpu_fpregs_owner_ctx);
43
kernel_fpu_disable(void)44 static void kernel_fpu_disable(void)
45 {
46 WARN_ON_FPU(this_cpu_read(in_kernel_fpu));
47 this_cpu_write(in_kernel_fpu, true);
48 }
49
kernel_fpu_enable(void)50 static void kernel_fpu_enable(void)
51 {
52 WARN_ON_FPU(!this_cpu_read(in_kernel_fpu));
53 this_cpu_write(in_kernel_fpu, false);
54 }
55
kernel_fpu_disabled(void)56 static bool kernel_fpu_disabled(void)
57 {
58 return this_cpu_read(in_kernel_fpu);
59 }
60
61 /*
62 * Were we in an interrupt that interrupted kernel mode?
63 *
64 * On others, we can do a kernel_fpu_begin/end() pair *ONLY* if that
65 * pair does nothing at all: the thread must not have fpu (so
66 * that we don't try to save the FPU state), and TS must
67 * be set (so that the clts/stts pair does nothing that is
68 * visible in the interrupted kernel thread).
69 *
70 * Except for the eagerfpu case when we return true; in the likely case
71 * the thread has FPU but we are not going to set/clear TS.
72 */
interrupted_kernel_fpu_idle(void)73 static bool interrupted_kernel_fpu_idle(void)
74 {
75 if (kernel_fpu_disabled())
76 return false;
77
78 if (use_eager_fpu())
79 return true;
80
81 return !current->thread.fpu.fpregs_active && (read_cr0() & X86_CR0_TS);
82 }
83
84 /*
85 * Were we in user mode (or vm86 mode) when we were
86 * interrupted?
87 *
88 * Doing kernel_fpu_begin/end() is ok if we are running
89 * in an interrupt context from user mode - we'll just
90 * save the FPU state as required.
91 */
interrupted_user_mode(void)92 static bool interrupted_user_mode(void)
93 {
94 struct pt_regs *regs = get_irq_regs();
95 return regs && user_mode(regs);
96 }
97
98 /*
99 * Can we use the FPU in kernel mode with the
100 * whole "kernel_fpu_begin/end()" sequence?
101 *
102 * It's always ok in process context (ie "not interrupt")
103 * but it is sometimes ok even from an irq.
104 */
irq_fpu_usable(void)105 bool irq_fpu_usable(void)
106 {
107 return !in_interrupt() ||
108 interrupted_user_mode() ||
109 interrupted_kernel_fpu_idle();
110 }
111 EXPORT_SYMBOL(irq_fpu_usable);
112
__kernel_fpu_begin(void)113 void __kernel_fpu_begin(void)
114 {
115 struct fpu *fpu = ¤t->thread.fpu;
116
117 WARN_ON_FPU(!irq_fpu_usable());
118
119 kernel_fpu_disable();
120
121 if (fpu->fpregs_active) {
122 /*
123 * Ignore return value -- we don't care if reg state
124 * is clobbered.
125 */
126 copy_fpregs_to_fpstate(fpu);
127 } else {
128 this_cpu_write(fpu_fpregs_owner_ctx, NULL);
129 __fpregs_activate_hw();
130 }
131 }
132 EXPORT_SYMBOL(__kernel_fpu_begin);
133
__kernel_fpu_end(void)134 void __kernel_fpu_end(void)
135 {
136 struct fpu *fpu = ¤t->thread.fpu;
137
138 if (fpu->fpregs_active)
139 copy_kernel_to_fpregs(&fpu->state);
140 else
141 __fpregs_deactivate_hw();
142
143 kernel_fpu_enable();
144 }
145 EXPORT_SYMBOL(__kernel_fpu_end);
146
kernel_fpu_begin(void)147 void kernel_fpu_begin(void)
148 {
149 preempt_disable();
150 __kernel_fpu_begin();
151 }
152 EXPORT_SYMBOL_GPL(kernel_fpu_begin);
153
kernel_fpu_end(void)154 void kernel_fpu_end(void)
155 {
156 __kernel_fpu_end();
157 preempt_enable();
158 }
159 EXPORT_SYMBOL_GPL(kernel_fpu_end);
160
161 /*
162 * CR0::TS save/restore functions:
163 */
irq_ts_save(void)164 int irq_ts_save(void)
165 {
166 /*
167 * If in process context and not atomic, we can take a spurious DNA fault.
168 * Otherwise, doing clts() in process context requires disabling preemption
169 * or some heavy lifting like kernel_fpu_begin()
170 */
171 if (!in_atomic())
172 return 0;
173
174 if (read_cr0() & X86_CR0_TS) {
175 clts();
176 return 1;
177 }
178
179 return 0;
180 }
181 EXPORT_SYMBOL_GPL(irq_ts_save);
182
irq_ts_restore(int TS_state)183 void irq_ts_restore(int TS_state)
184 {
185 if (TS_state)
186 stts();
187 }
188 EXPORT_SYMBOL_GPL(irq_ts_restore);
189
190 /*
191 * Save the FPU state (mark it for reload if necessary):
192 *
193 * This only ever gets called for the current task.
194 */
fpu__save(struct fpu * fpu)195 void fpu__save(struct fpu *fpu)
196 {
197 WARN_ON_FPU(fpu != ¤t->thread.fpu);
198
199 preempt_disable();
200 trace_x86_fpu_before_save(fpu);
201 if (fpu->fpregs_active) {
202 if (!copy_fpregs_to_fpstate(fpu)) {
203 if (use_eager_fpu())
204 copy_kernel_to_fpregs(&fpu->state);
205 else
206 fpregs_deactivate(fpu);
207 }
208 }
209 trace_x86_fpu_after_save(fpu);
210 preempt_enable();
211 }
212 EXPORT_SYMBOL_GPL(fpu__save);
213
214 /*
215 * Legacy x87 fpstate state init:
216 */
fpstate_init_fstate(struct fregs_state * fp)217 static inline void fpstate_init_fstate(struct fregs_state *fp)
218 {
219 fp->cwd = 0xffff037fu;
220 fp->swd = 0xffff0000u;
221 fp->twd = 0xffffffffu;
222 fp->fos = 0xffff0000u;
223 }
224
fpstate_init(union fpregs_state * state)225 void fpstate_init(union fpregs_state *state)
226 {
227 if (!static_cpu_has(X86_FEATURE_FPU)) {
228 fpstate_init_soft(&state->soft);
229 return;
230 }
231
232 memset(state, 0, fpu_kernel_xstate_size);
233
234 /*
235 * XRSTORS requires that this bit is set in xcomp_bv, or
236 * it will #GP. Make sure it is replaced after the memset().
237 */
238 if (static_cpu_has(X86_FEATURE_XSAVES))
239 state->xsave.header.xcomp_bv = XCOMP_BV_COMPACTED_FORMAT |
240 xfeatures_mask;
241
242 if (static_cpu_has(X86_FEATURE_FXSR))
243 fpstate_init_fxstate(&state->fxsave);
244 else
245 fpstate_init_fstate(&state->fsave);
246 }
247 EXPORT_SYMBOL_GPL(fpstate_init);
248
fpu__copy(struct fpu * dst_fpu,struct fpu * src_fpu)249 int fpu__copy(struct fpu *dst_fpu, struct fpu *src_fpu)
250 {
251 dst_fpu->counter = 0;
252 dst_fpu->fpregs_active = 0;
253 dst_fpu->last_cpu = -1;
254
255 if (!src_fpu->fpstate_active || !static_cpu_has(X86_FEATURE_FPU))
256 return 0;
257
258 WARN_ON_FPU(src_fpu != ¤t->thread.fpu);
259
260 /*
261 * Don't let 'init optimized' areas of the XSAVE area
262 * leak into the child task:
263 */
264 if (use_eager_fpu())
265 memset(&dst_fpu->state.xsave, 0, fpu_kernel_xstate_size);
266
267 /*
268 * Save current FPU registers directly into the child
269 * FPU context, without any memory-to-memory copying.
270 * In lazy mode, if the FPU context isn't loaded into
271 * fpregs, CR0.TS will be set and do_device_not_available
272 * will load the FPU context.
273 *
274 * We have to do all this with preemption disabled,
275 * mostly because of the FNSAVE case, because in that
276 * case we must not allow preemption in the window
277 * between the FNSAVE and us marking the context lazy.
278 *
279 * It shouldn't be an issue as even FNSAVE is plenty
280 * fast in terms of critical section length.
281 */
282 preempt_disable();
283 if (!copy_fpregs_to_fpstate(dst_fpu)) {
284 memcpy(&src_fpu->state, &dst_fpu->state,
285 fpu_kernel_xstate_size);
286
287 if (use_eager_fpu())
288 copy_kernel_to_fpregs(&src_fpu->state);
289 else
290 fpregs_deactivate(src_fpu);
291 }
292 preempt_enable();
293
294 trace_x86_fpu_copy_src(src_fpu);
295 trace_x86_fpu_copy_dst(dst_fpu);
296
297 return 0;
298 }
299
300 /*
301 * Activate the current task's in-memory FPU context,
302 * if it has not been used before:
303 */
fpu__activate_curr(struct fpu * fpu)304 void fpu__activate_curr(struct fpu *fpu)
305 {
306 WARN_ON_FPU(fpu != ¤t->thread.fpu);
307
308 if (!fpu->fpstate_active) {
309 fpstate_init(&fpu->state);
310 trace_x86_fpu_init_state(fpu);
311
312 trace_x86_fpu_activate_state(fpu);
313 /* Safe to do for the current task: */
314 fpu->fpstate_active = 1;
315 }
316 }
317 EXPORT_SYMBOL_GPL(fpu__activate_curr);
318
319 /*
320 * This function must be called before we read a task's fpstate.
321 *
322 * If the task has not used the FPU before then initialize its
323 * fpstate.
324 *
325 * If the task has used the FPU before then save it.
326 */
fpu__activate_fpstate_read(struct fpu * fpu)327 void fpu__activate_fpstate_read(struct fpu *fpu)
328 {
329 /*
330 * If fpregs are active (in the current CPU), then
331 * copy them to the fpstate:
332 */
333 if (fpu->fpregs_active) {
334 fpu__save(fpu);
335 } else {
336 if (!fpu->fpstate_active) {
337 fpstate_init(&fpu->state);
338 trace_x86_fpu_init_state(fpu);
339
340 trace_x86_fpu_activate_state(fpu);
341 /* Safe to do for current and for stopped child tasks: */
342 fpu->fpstate_active = 1;
343 }
344 }
345 }
346
347 /*
348 * This function must be called before we write a task's fpstate.
349 *
350 * If the task has used the FPU before then unlazy it.
351 * If the task has not used the FPU before then initialize its fpstate.
352 *
353 * After this function call, after registers in the fpstate are
354 * modified and the child task has woken up, the child task will
355 * restore the modified FPU state from the modified context. If we
356 * didn't clear its lazy status here then the lazy in-registers
357 * state pending on its former CPU could be restored, corrupting
358 * the modifications.
359 */
fpu__activate_fpstate_write(struct fpu * fpu)360 void fpu__activate_fpstate_write(struct fpu *fpu)
361 {
362 /*
363 * Only stopped child tasks can be used to modify the FPU
364 * state in the fpstate buffer:
365 */
366 WARN_ON_FPU(fpu == ¤t->thread.fpu);
367
368 if (fpu->fpstate_active) {
369 /* Invalidate any lazy state: */
370 fpu->last_cpu = -1;
371 } else {
372 fpstate_init(&fpu->state);
373 trace_x86_fpu_init_state(fpu);
374
375 trace_x86_fpu_activate_state(fpu);
376 /* Safe to do for stopped child tasks: */
377 fpu->fpstate_active = 1;
378 }
379 }
380
381 /*
382 * This function must be called before we write the current
383 * task's fpstate.
384 *
385 * This call gets the current FPU register state and moves
386 * it in to the 'fpstate'. Preemption is disabled so that
387 * no writes to the 'fpstate' can occur from context
388 * swiches.
389 *
390 * Must be followed by a fpu__current_fpstate_write_end().
391 */
fpu__current_fpstate_write_begin(void)392 void fpu__current_fpstate_write_begin(void)
393 {
394 struct fpu *fpu = ¤t->thread.fpu;
395
396 /*
397 * Ensure that the context-switching code does not write
398 * over the fpstate while we are doing our update.
399 */
400 preempt_disable();
401
402 /*
403 * Move the fpregs in to the fpu's 'fpstate'.
404 */
405 fpu__activate_fpstate_read(fpu);
406
407 /*
408 * The caller is about to write to 'fpu'. Ensure that no
409 * CPU thinks that its fpregs match the fpstate. This
410 * ensures we will not be lazy and skip a XRSTOR in the
411 * future.
412 */
413 fpu->last_cpu = -1;
414 }
415
416 /*
417 * This function must be paired with fpu__current_fpstate_write_begin()
418 *
419 * This will ensure that the modified fpstate gets placed back in
420 * the fpregs if necessary.
421 *
422 * Note: This function may be called whether or not an _actual_
423 * write to the fpstate occurred.
424 */
fpu__current_fpstate_write_end(void)425 void fpu__current_fpstate_write_end(void)
426 {
427 struct fpu *fpu = ¤t->thread.fpu;
428
429 /*
430 * 'fpu' now has an updated copy of the state, but the
431 * registers may still be out of date. Update them with
432 * an XRSTOR if they are active.
433 */
434 if (fpregs_active())
435 copy_kernel_to_fpregs(&fpu->state);
436
437 /*
438 * Our update is done and the fpregs/fpstate are in sync
439 * if necessary. Context switches can happen again.
440 */
441 preempt_enable();
442 }
443
444 /*
445 * 'fpu__restore()' is called to copy FPU registers from
446 * the FPU fpstate to the live hw registers and to activate
447 * access to the hardware registers, so that FPU instructions
448 * can be used afterwards.
449 *
450 * Must be called with kernel preemption disabled (for example
451 * with local interrupts disabled, as it is in the case of
452 * do_device_not_available()).
453 */
fpu__restore(struct fpu * fpu)454 void fpu__restore(struct fpu *fpu)
455 {
456 fpu__activate_curr(fpu);
457
458 /* Avoid __kernel_fpu_begin() right after fpregs_activate() */
459 kernel_fpu_disable();
460 trace_x86_fpu_before_restore(fpu);
461 fpregs_activate(fpu);
462 copy_kernel_to_fpregs(&fpu->state);
463 fpu->counter++;
464 trace_x86_fpu_after_restore(fpu);
465 kernel_fpu_enable();
466 }
467 EXPORT_SYMBOL_GPL(fpu__restore);
468
469 /*
470 * Drops current FPU state: deactivates the fpregs and
471 * the fpstate. NOTE: it still leaves previous contents
472 * in the fpregs in the eager-FPU case.
473 *
474 * This function can be used in cases where we know that
475 * a state-restore is coming: either an explicit one,
476 * or a reschedule.
477 */
fpu__drop(struct fpu * fpu)478 void fpu__drop(struct fpu *fpu)
479 {
480 preempt_disable();
481 fpu->counter = 0;
482
483 if (fpu->fpregs_active) {
484 /* Ignore delayed exceptions from user space */
485 asm volatile("1: fwait\n"
486 "2:\n"
487 _ASM_EXTABLE(1b, 2b));
488 fpregs_deactivate(fpu);
489 }
490
491 fpu->fpstate_active = 0;
492
493 trace_x86_fpu_dropped(fpu);
494
495 preempt_enable();
496 }
497
498 /*
499 * Clear FPU registers by setting them up from
500 * the init fpstate:
501 */
copy_init_fpstate_to_fpregs(void)502 static inline void copy_init_fpstate_to_fpregs(void)
503 {
504 if (use_xsave())
505 copy_kernel_to_xregs(&init_fpstate.xsave, -1);
506 else if (static_cpu_has(X86_FEATURE_FXSR))
507 copy_kernel_to_fxregs(&init_fpstate.fxsave);
508 else
509 copy_kernel_to_fregs(&init_fpstate.fsave);
510
511 if (boot_cpu_has(X86_FEATURE_OSPKE))
512 copy_init_pkru_to_fpregs();
513 }
514
515 /*
516 * Clear the FPU state back to init state.
517 *
518 * Called by sys_execve(), by the signal handler code and by various
519 * error paths.
520 */
fpu__clear(struct fpu * fpu)521 void fpu__clear(struct fpu *fpu)
522 {
523 WARN_ON_FPU(fpu != ¤t->thread.fpu); /* Almost certainly an anomaly */
524
525 fpu__drop(fpu);
526
527 /*
528 * Make sure fpstate is cleared and initialized.
529 */
530 if (static_cpu_has(X86_FEATURE_FPU)) {
531 fpu__activate_curr(fpu);
532 user_fpu_begin();
533 copy_init_fpstate_to_fpregs();
534 }
535 }
536
537 /*
538 * x87 math exception handling:
539 */
540
fpu__exception_code(struct fpu * fpu,int trap_nr)541 int fpu__exception_code(struct fpu *fpu, int trap_nr)
542 {
543 int err;
544
545 if (trap_nr == X86_TRAP_MF) {
546 unsigned short cwd, swd;
547 /*
548 * (~cwd & swd) will mask out exceptions that are not set to unmasked
549 * status. 0x3f is the exception bits in these regs, 0x200 is the
550 * C1 reg you need in case of a stack fault, 0x040 is the stack
551 * fault bit. We should only be taking one exception at a time,
552 * so if this combination doesn't produce any single exception,
553 * then we have a bad program that isn't synchronizing its FPU usage
554 * and it will suffer the consequences since we won't be able to
555 * fully reproduce the context of the exception.
556 */
557 if (boot_cpu_has(X86_FEATURE_FXSR)) {
558 cwd = fpu->state.fxsave.cwd;
559 swd = fpu->state.fxsave.swd;
560 } else {
561 cwd = (unsigned short)fpu->state.fsave.cwd;
562 swd = (unsigned short)fpu->state.fsave.swd;
563 }
564
565 err = swd & ~cwd;
566 } else {
567 /*
568 * The SIMD FPU exceptions are handled a little differently, as there
569 * is only a single status/control register. Thus, to determine which
570 * unmasked exception was caught we must mask the exception mask bits
571 * at 0x1f80, and then use these to mask the exception bits at 0x3f.
572 */
573 unsigned short mxcsr = MXCSR_DEFAULT;
574
575 if (boot_cpu_has(X86_FEATURE_XMM))
576 mxcsr = fpu->state.fxsave.mxcsr;
577
578 err = ~(mxcsr >> 7) & mxcsr;
579 }
580
581 if (err & 0x001) { /* Invalid op */
582 /*
583 * swd & 0x240 == 0x040: Stack Underflow
584 * swd & 0x240 == 0x240: Stack Overflow
585 * User must clear the SF bit (0x40) if set
586 */
587 return FPE_FLTINV;
588 } else if (err & 0x004) { /* Divide by Zero */
589 return FPE_FLTDIV;
590 } else if (err & 0x008) { /* Overflow */
591 return FPE_FLTOVF;
592 } else if (err & 0x012) { /* Denormal, Underflow */
593 return FPE_FLTUND;
594 } else if (err & 0x020) { /* Precision */
595 return FPE_FLTRES;
596 }
597
598 /*
599 * If we're using IRQ 13, or supposedly even some trap
600 * X86_TRAP_MF implementations, it's possible
601 * we get a spurious trap, which is not an error.
602 */
603 return 0;
604 }
605