• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright (C) 1994 Linus Torvalds
3  *
4  * Pentium III FXSR, SSE support
5  * General FPU state handling cleanups
6  *	Gareth Hughes <gareth@valinux.com>, May 2000
7  * x86-64 work by Andi Kleen 2002
8  */
9 
10 #ifndef _FPU_INTERNAL_H
11 #define _FPU_INTERNAL_H
12 
13 #include <linux/kernel_stat.h>
14 #include <linux/regset.h>
15 #include <linux/slab.h>
16 #include <asm/asm.h>
17 #include <asm/cpufeature.h>
18 #include <asm/processor.h>
19 #include <asm/sigcontext.h>
20 #include <asm/user.h>
21 #include <asm/uaccess.h>
22 #include <asm/xsave.h>
23 
24 extern unsigned int sig_xstate_size;
25 extern void fpu_init(void);
26 
27 DECLARE_PER_CPU(struct task_struct *, fpu_owner_task);
28 
29 extern user_regset_active_fn fpregs_active, xfpregs_active;
30 extern user_regset_get_fn fpregs_get, xfpregs_get, fpregs_soft_get,
31 				xstateregs_get;
32 extern user_regset_set_fn fpregs_set, xfpregs_set, fpregs_soft_set,
33 				 xstateregs_set;
34 
35 
36 /*
37  * xstateregs_active == fpregs_active. Please refer to the comment
38  * at the definition of fpregs_active.
39  */
40 #define xstateregs_active	fpregs_active
41 
42 extern struct _fpx_sw_bytes fx_sw_reserved;
43 #ifdef CONFIG_IA32_EMULATION
44 extern unsigned int sig_xstate_ia32_size;
45 extern struct _fpx_sw_bytes fx_sw_reserved_ia32;
46 struct _fpstate_ia32;
47 struct _xstate_ia32;
48 extern int save_i387_xstate_ia32(void __user *buf);
49 extern int restore_i387_xstate_ia32(void __user *buf);
50 #endif
51 
52 #ifdef CONFIG_MATH_EMULATION
53 extern void finit_soft_fpu(struct i387_soft_struct *soft);
54 #else
finit_soft_fpu(struct i387_soft_struct * soft)55 static inline void finit_soft_fpu(struct i387_soft_struct *soft) {}
56 #endif
57 
58 #define X87_FSW_ES (1 << 7)	/* Exception Summary */
59 
use_xsaveopt(void)60 static __always_inline __pure bool use_xsaveopt(void)
61 {
62 	return static_cpu_has(X86_FEATURE_XSAVEOPT);
63 }
64 
use_xsave(void)65 static __always_inline __pure bool use_xsave(void)
66 {
67 	return static_cpu_has(X86_FEATURE_XSAVE);
68 }
69 
use_fxsr(void)70 static __always_inline __pure bool use_fxsr(void)
71 {
72         return static_cpu_has(X86_FEATURE_FXSR);
73 }
74 
75 extern void __sanitize_i387_state(struct task_struct *);
76 
sanitize_i387_state(struct task_struct * tsk)77 static inline void sanitize_i387_state(struct task_struct *tsk)
78 {
79 	if (!use_xsaveopt())
80 		return;
81 	__sanitize_i387_state(tsk);
82 }
83 
84 #ifdef CONFIG_X86_64
fxrstor_checking(struct i387_fxsave_struct * fx)85 static inline int fxrstor_checking(struct i387_fxsave_struct *fx)
86 {
87 	int err;
88 
89 	/* See comment in fxsave() below. */
90 #ifdef CONFIG_AS_FXSAVEQ
91 	asm volatile("1:  fxrstorq %[fx]\n\t"
92 		     "2:\n"
93 		     ".section .fixup,\"ax\"\n"
94 		     "3:  movl $-1,%[err]\n"
95 		     "    jmp  2b\n"
96 		     ".previous\n"
97 		     _ASM_EXTABLE(1b, 3b)
98 		     : [err] "=r" (err)
99 		     : [fx] "m" (*fx), "0" (0));
100 #else
101 	asm volatile("1:  rex64/fxrstor (%[fx])\n\t"
102 		     "2:\n"
103 		     ".section .fixup,\"ax\"\n"
104 		     "3:  movl $-1,%[err]\n"
105 		     "    jmp  2b\n"
106 		     ".previous\n"
107 		     _ASM_EXTABLE(1b, 3b)
108 		     : [err] "=r" (err)
109 		     : [fx] "R" (fx), "m" (*fx), "0" (0));
110 #endif
111 	return err;
112 }
113 
fxsave_user(struct i387_fxsave_struct __user * fx)114 static inline int fxsave_user(struct i387_fxsave_struct __user *fx)
115 {
116 	int err;
117 
118 	/*
119 	 * Clear the bytes not touched by the fxsave and reserved
120 	 * for the SW usage.
121 	 */
122 	err = __clear_user(&fx->sw_reserved,
123 			   sizeof(struct _fpx_sw_bytes));
124 	if (unlikely(err))
125 		return -EFAULT;
126 
127 	/* See comment in fxsave() below. */
128 #ifdef CONFIG_AS_FXSAVEQ
129 	asm volatile("1:  fxsaveq %[fx]\n\t"
130 		     "2:\n"
131 		     ".section .fixup,\"ax\"\n"
132 		     "3:  movl $-1,%[err]\n"
133 		     "    jmp  2b\n"
134 		     ".previous\n"
135 		     _ASM_EXTABLE(1b, 3b)
136 		     : [err] "=r" (err), [fx] "=m" (*fx)
137 		     : "0" (0));
138 #else
139 	asm volatile("1:  rex64/fxsave (%[fx])\n\t"
140 		     "2:\n"
141 		     ".section .fixup,\"ax\"\n"
142 		     "3:  movl $-1,%[err]\n"
143 		     "    jmp  2b\n"
144 		     ".previous\n"
145 		     _ASM_EXTABLE(1b, 3b)
146 		     : [err] "=r" (err), "=m" (*fx)
147 		     : [fx] "R" (fx), "0" (0));
148 #endif
149 	if (unlikely(err) &&
150 	    __clear_user(fx, sizeof(struct i387_fxsave_struct)))
151 		err = -EFAULT;
152 	/* No need to clear here because the caller clears USED_MATH */
153 	return err;
154 }
155 
fpu_fxsave(struct fpu * fpu)156 static inline void fpu_fxsave(struct fpu *fpu)
157 {
158 	/* Using "rex64; fxsave %0" is broken because, if the memory operand
159 	   uses any extended registers for addressing, a second REX prefix
160 	   will be generated (to the assembler, rex64 followed by semicolon
161 	   is a separate instruction), and hence the 64-bitness is lost. */
162 
163 #ifdef CONFIG_AS_FXSAVEQ
164 	/* Using "fxsaveq %0" would be the ideal choice, but is only supported
165 	   starting with gas 2.16. */
166 	__asm__ __volatile__("fxsaveq %0"
167 			     : "=m" (fpu->state->fxsave));
168 #else
169 	/* Using, as a workaround, the properly prefixed form below isn't
170 	   accepted by any binutils version so far released, complaining that
171 	   the same type of prefix is used twice if an extended register is
172 	   needed for addressing (fix submitted to mainline 2005-11-21).
173 	asm volatile("rex64/fxsave %0"
174 		     : "=m" (fpu->state->fxsave));
175 	   This, however, we can work around by forcing the compiler to select
176 	   an addressing mode that doesn't require extended registers. */
177 	asm volatile("rex64/fxsave (%[fx])"
178 		     : "=m" (fpu->state->fxsave)
179 		     : [fx] "R" (&fpu->state->fxsave));
180 #endif
181 }
182 
183 #else  /* CONFIG_X86_32 */
184 
185 /* perform fxrstor iff the processor has extended states, otherwise frstor */
fxrstor_checking(struct i387_fxsave_struct * fx)186 static inline int fxrstor_checking(struct i387_fxsave_struct *fx)
187 {
188 	/*
189 	 * The "nop" is needed to make the instructions the same
190 	 * length.
191 	 */
192 	alternative_input(
193 		"nop ; frstor %1",
194 		"fxrstor %1",
195 		X86_FEATURE_FXSR,
196 		"m" (*fx));
197 
198 	return 0;
199 }
200 
fpu_fxsave(struct fpu * fpu)201 static inline void fpu_fxsave(struct fpu *fpu)
202 {
203 	asm volatile("fxsave %[fx]"
204 		     : [fx] "=m" (fpu->state->fxsave));
205 }
206 
207 #endif	/* CONFIG_X86_64 */
208 
209 /*
210  * These must be called with preempt disabled. Returns
211  * 'true' if the FPU state is still intact.
212  */
fpu_save_init(struct fpu * fpu)213 static inline int fpu_save_init(struct fpu *fpu)
214 {
215 	if (use_xsave()) {
216 		fpu_xsave(fpu);
217 
218 		/*
219 		 * xsave header may indicate the init state of the FP.
220 		 */
221 		if (!(fpu->state->xsave.xsave_hdr.xstate_bv & XSTATE_FP))
222 			return 1;
223 	} else if (use_fxsr()) {
224 		fpu_fxsave(fpu);
225 	} else {
226 		asm volatile("fnsave %[fx]; fwait"
227 			     : [fx] "=m" (fpu->state->fsave));
228 		return 0;
229 	}
230 
231 	/*
232 	 * If exceptions are pending, we need to clear them so
233 	 * that we don't randomly get exceptions later.
234 	 *
235 	 * FIXME! Is this perhaps only true for the old-style
236 	 * irq13 case? Maybe we could leave the x87 state
237 	 * intact otherwise?
238 	 */
239 	if (unlikely(fpu->state->fxsave.swd & X87_FSW_ES)) {
240 		asm volatile("fnclex");
241 		return 0;
242 	}
243 	return 1;
244 }
245 
__save_init_fpu(struct task_struct * tsk)246 static inline int __save_init_fpu(struct task_struct *tsk)
247 {
248 	return fpu_save_init(&tsk->thread.fpu);
249 }
250 
fpu_fxrstor_checking(struct fpu * fpu)251 static inline int fpu_fxrstor_checking(struct fpu *fpu)
252 {
253 	return fxrstor_checking(&fpu->state->fxsave);
254 }
255 
fpu_restore_checking(struct fpu * fpu)256 static inline int fpu_restore_checking(struct fpu *fpu)
257 {
258 	if (use_xsave())
259 		return fpu_xrstor_checking(fpu);
260 	else
261 		return fpu_fxrstor_checking(fpu);
262 }
263 
restore_fpu_checking(struct task_struct * tsk)264 static inline int restore_fpu_checking(struct task_struct *tsk)
265 {
266 	/* AMD K7/K8 CPUs don't save/restore FDP/FIP/FOP unless an exception
267 	   is pending.  Clear the x87 state here by setting it to fixed
268 	   values. "m" is a random variable that should be in L1 */
269 	alternative_input(
270 		ASM_NOP8 ASM_NOP2,
271 		"emms\n\t"		/* clear stack tags */
272 		"fildl %P[addr]",	/* set F?P to defined value */
273 		X86_FEATURE_FXSAVE_LEAK,
274 		[addr] "m" (tsk->thread.fpu.has_fpu));
275 
276 	return fpu_restore_checking(&tsk->thread.fpu);
277 }
278 
279 /*
280  * Software FPU state helpers. Careful: these need to
281  * be preemption protection *and* they need to be
282  * properly paired with the CR0.TS changes!
283  */
__thread_has_fpu(struct task_struct * tsk)284 static inline int __thread_has_fpu(struct task_struct *tsk)
285 {
286 	return tsk->thread.fpu.has_fpu;
287 }
288 
289 /* Must be paired with an 'stts' after! */
__thread_clear_has_fpu(struct task_struct * tsk)290 static inline void __thread_clear_has_fpu(struct task_struct *tsk)
291 {
292 	tsk->thread.fpu.has_fpu = 0;
293 	percpu_write(fpu_owner_task, NULL);
294 }
295 
296 /* Must be paired with a 'clts' before! */
__thread_set_has_fpu(struct task_struct * tsk)297 static inline void __thread_set_has_fpu(struct task_struct *tsk)
298 {
299 	tsk->thread.fpu.has_fpu = 1;
300 	percpu_write(fpu_owner_task, tsk);
301 }
302 
303 /*
304  * Encapsulate the CR0.TS handling together with the
305  * software flag.
306  *
307  * These generally need preemption protection to work,
308  * do try to avoid using these on their own.
309  */
__thread_fpu_end(struct task_struct * tsk)310 static inline void __thread_fpu_end(struct task_struct *tsk)
311 {
312 	__thread_clear_has_fpu(tsk);
313 	stts();
314 }
315 
__thread_fpu_begin(struct task_struct * tsk)316 static inline void __thread_fpu_begin(struct task_struct *tsk)
317 {
318 	clts();
319 	__thread_set_has_fpu(tsk);
320 }
321 
322 /*
323  * FPU state switching for scheduling.
324  *
325  * This is a two-stage process:
326  *
327  *  - switch_fpu_prepare() saves the old state and
328  *    sets the new state of the CR0.TS bit. This is
329  *    done within the context of the old process.
330  *
331  *  - switch_fpu_finish() restores the new state as
332  *    necessary.
333  */
334 typedef struct { int preload; } fpu_switch_t;
335 
336 /*
337  * Must be run with preemption disabled: this clears the fpu_owner_task,
338  * on this CPU.
339  *
340  * This will disable any lazy FPU state restore of the current FPU state,
341  * but if the current thread owns the FPU, it will still be saved by.
342  */
__cpu_disable_lazy_restore(unsigned int cpu)343 static inline void __cpu_disable_lazy_restore(unsigned int cpu)
344 {
345 	per_cpu(fpu_owner_task, cpu) = NULL;
346 }
347 
fpu_lazy_restore(struct task_struct * new,unsigned int cpu)348 static inline int fpu_lazy_restore(struct task_struct *new, unsigned int cpu)
349 {
350 	return new == percpu_read_stable(fpu_owner_task) &&
351 		cpu == new->thread.fpu.last_cpu;
352 }
353 
switch_fpu_prepare(struct task_struct * old,struct task_struct * new,int cpu)354 static inline fpu_switch_t switch_fpu_prepare(struct task_struct *old, struct task_struct *new, int cpu)
355 {
356 	fpu_switch_t fpu;
357 
358 	fpu.preload = tsk_used_math(new) && new->fpu_counter > 5;
359 	if (__thread_has_fpu(old)) {
360 		if (!__save_init_fpu(old))
361 			cpu = ~0;
362 		old->thread.fpu.last_cpu = cpu;
363 		old->thread.fpu.has_fpu = 0;	/* But leave fpu_owner_task! */
364 
365 		/* Don't change CR0.TS if we just switch! */
366 		if (fpu.preload) {
367 			new->fpu_counter++;
368 			__thread_set_has_fpu(new);
369 			prefetch(new->thread.fpu.state);
370 		} else
371 			stts();
372 	} else {
373 		old->fpu_counter = 0;
374 		old->thread.fpu.last_cpu = ~0;
375 		if (fpu.preload) {
376 			new->fpu_counter++;
377 			if (fpu_lazy_restore(new, cpu))
378 				fpu.preload = 0;
379 			else
380 				prefetch(new->thread.fpu.state);
381 			__thread_fpu_begin(new);
382 		}
383 	}
384 	return fpu;
385 }
386 
387 /*
388  * By the time this gets called, we've already cleared CR0.TS and
389  * given the process the FPU if we are going to preload the FPU
390  * state - all we need to do is to conditionally restore the register
391  * state itself.
392  */
switch_fpu_finish(struct task_struct * new,fpu_switch_t fpu)393 static inline void switch_fpu_finish(struct task_struct *new, fpu_switch_t fpu)
394 {
395 	if (fpu.preload) {
396 		if (unlikely(restore_fpu_checking(new)))
397 			__thread_fpu_end(new);
398 	}
399 }
400 
401 /*
402  * Signal frame handlers...
403  */
404 extern int save_i387_xstate(void __user *buf);
405 extern int restore_i387_xstate(void __user *buf);
406 
__clear_fpu(struct task_struct * tsk)407 static inline void __clear_fpu(struct task_struct *tsk)
408 {
409 	if (__thread_has_fpu(tsk)) {
410 		/* Ignore delayed exceptions from user space */
411 		asm volatile("1: fwait\n"
412 			     "2:\n"
413 			     _ASM_EXTABLE(1b, 2b));
414 		__thread_fpu_end(tsk);
415 	}
416 }
417 
418 /*
419  * The actual user_fpu_begin/end() functions
420  * need to be preemption-safe.
421  *
422  * NOTE! user_fpu_end() must be used only after you
423  * have saved the FP state, and user_fpu_begin() must
424  * be used only immediately before restoring it.
425  * These functions do not do any save/restore on
426  * their own.
427  */
user_fpu_end(void)428 static inline void user_fpu_end(void)
429 {
430 	preempt_disable();
431 	__thread_fpu_end(current);
432 	preempt_enable();
433 }
434 
user_fpu_begin(void)435 static inline void user_fpu_begin(void)
436 {
437 	preempt_disable();
438 	if (!user_has_fpu())
439 		__thread_fpu_begin(current);
440 	preempt_enable();
441 }
442 
443 /*
444  * These disable preemption on their own and are safe
445  */
save_init_fpu(struct task_struct * tsk)446 static inline void save_init_fpu(struct task_struct *tsk)
447 {
448 	WARN_ON_ONCE(!__thread_has_fpu(tsk));
449 	preempt_disable();
450 	__save_init_fpu(tsk);
451 	__thread_fpu_end(tsk);
452 	preempt_enable();
453 }
454 
clear_fpu(struct task_struct * tsk)455 static inline void clear_fpu(struct task_struct *tsk)
456 {
457 	preempt_disable();
458 	__clear_fpu(tsk);
459 	preempt_enable();
460 }
461 
462 /*
463  * i387 state interaction
464  */
get_fpu_cwd(struct task_struct * tsk)465 static inline unsigned short get_fpu_cwd(struct task_struct *tsk)
466 {
467 	if (cpu_has_fxsr) {
468 		return tsk->thread.fpu.state->fxsave.cwd;
469 	} else {
470 		return (unsigned short)tsk->thread.fpu.state->fsave.cwd;
471 	}
472 }
473 
get_fpu_swd(struct task_struct * tsk)474 static inline unsigned short get_fpu_swd(struct task_struct *tsk)
475 {
476 	if (cpu_has_fxsr) {
477 		return tsk->thread.fpu.state->fxsave.swd;
478 	} else {
479 		return (unsigned short)tsk->thread.fpu.state->fsave.swd;
480 	}
481 }
482 
get_fpu_mxcsr(struct task_struct * tsk)483 static inline unsigned short get_fpu_mxcsr(struct task_struct *tsk)
484 {
485 	if (cpu_has_xmm) {
486 		return tsk->thread.fpu.state->fxsave.mxcsr;
487 	} else {
488 		return MXCSR_DEFAULT;
489 	}
490 }
491 
fpu_allocated(struct fpu * fpu)492 static bool fpu_allocated(struct fpu *fpu)
493 {
494 	return fpu->state != NULL;
495 }
496 
fpu_alloc(struct fpu * fpu)497 static inline int fpu_alloc(struct fpu *fpu)
498 {
499 	if (fpu_allocated(fpu))
500 		return 0;
501 	fpu->state = kmem_cache_alloc(task_xstate_cachep, GFP_KERNEL);
502 	if (!fpu->state)
503 		return -ENOMEM;
504 	WARN_ON((unsigned long)fpu->state & 15);
505 	return 0;
506 }
507 
fpu_free(struct fpu * fpu)508 static inline void fpu_free(struct fpu *fpu)
509 {
510 	if (fpu->state) {
511 		kmem_cache_free(task_xstate_cachep, fpu->state);
512 		fpu->state = NULL;
513 	}
514 }
515 
fpu_copy(struct fpu * dst,struct fpu * src)516 static inline void fpu_copy(struct fpu *dst, struct fpu *src)
517 {
518 	memcpy(dst->state, src->state, xstate_size);
519 }
520 
521 extern void fpu_finit(struct fpu *fpu);
522 
523 #endif
524