• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * xsave/xrstor support.
4  *
5  * Author: Suresh Siddha <suresh.b.siddha@intel.com>
6  */
7 #include <linux/compat.h>
8 #include <linux/cpu.h>
9 #include <linux/mman.h>
10 #include <linux/pkeys.h>
11 #include <linux/seq_file.h>
12 #include <linux/proc_fs.h>
13 
14 #include <asm/fpu/api.h>
15 #include <asm/fpu/internal.h>
16 #include <asm/fpu/signal.h>
17 #include <asm/fpu/regset.h>
18 #include <asm/fpu/xstate.h>
19 
20 #include <asm/tlbflush.h>
21 #include <asm/cpufeature.h>
22 
23 /*
24  * Although we spell it out in here, the Processor Trace
25  * xfeature is completely unused.  We use other mechanisms
26  * to save/restore PT state in Linux.
27  */
28 static const char *xfeature_names[] =
29 {
30 	"x87 floating point registers"	,
31 	"SSE registers"			,
32 	"AVX registers"			,
33 	"MPX bounds registers"		,
34 	"MPX CSR"			,
35 	"AVX-512 opmask"		,
36 	"AVX-512 Hi256"			,
37 	"AVX-512 ZMM_Hi256"		,
38 	"Processor Trace (unused)"	,
39 	"Protection Keys User registers",
40 	"PASID state",
41 	"unknown xstate feature"	,
42 };
43 
44 static short xsave_cpuid_features[] __initdata = {
45 	X86_FEATURE_FPU,
46 	X86_FEATURE_XMM,
47 	X86_FEATURE_AVX,
48 	X86_FEATURE_MPX,
49 	X86_FEATURE_MPX,
50 	X86_FEATURE_AVX512F,
51 	X86_FEATURE_AVX512F,
52 	X86_FEATURE_AVX512F,
53 	X86_FEATURE_INTEL_PT,
54 	X86_FEATURE_PKU,
55 	X86_FEATURE_ENQCMD,
56 };
57 
58 /*
59  * This represents the full set of bits that should ever be set in a kernel
60  * XSAVE buffer, both supervisor and user xstates.
61  */
62 u64 xfeatures_mask_all __ro_after_init;
63 EXPORT_SYMBOL_GPL(xfeatures_mask_all);
64 
65 static unsigned int xstate_offsets[XFEATURE_MAX] __ro_after_init =
66 	{ [ 0 ... XFEATURE_MAX - 1] = -1};
67 static unsigned int xstate_sizes[XFEATURE_MAX] __ro_after_init =
68 	{ [ 0 ... XFEATURE_MAX - 1] = -1};
69 static unsigned int xstate_comp_offsets[XFEATURE_MAX] __ro_after_init =
70 	{ [ 0 ... XFEATURE_MAX - 1] = -1};
71 static unsigned int xstate_supervisor_only_offsets[XFEATURE_MAX] __ro_after_init =
72 	{ [ 0 ... XFEATURE_MAX - 1] = -1};
73 
74 /*
75  * The XSAVE area of kernel can be in standard or compacted format;
76  * it is always in standard format for user mode. This is the user
77  * mode standard format size used for signal and ptrace frames.
78  */
79 unsigned int fpu_user_xstate_size __ro_after_init;
80 
81 /*
82  * Return whether the system supports a given xfeature.
83  *
84  * Also return the name of the (most advanced) feature that the caller requested:
85  */
cpu_has_xfeatures(u64 xfeatures_needed,const char ** feature_name)86 int cpu_has_xfeatures(u64 xfeatures_needed, const char **feature_name)
87 {
88 	u64 xfeatures_missing = xfeatures_needed & ~xfeatures_mask_all;
89 
90 	if (unlikely(feature_name)) {
91 		long xfeature_idx, max_idx;
92 		u64 xfeatures_print;
93 		/*
94 		 * So we use FLS here to be able to print the most advanced
95 		 * feature that was requested but is missing. So if a driver
96 		 * asks about "XFEATURE_MASK_SSE | XFEATURE_MASK_YMM" we'll print the
97 		 * missing AVX feature - this is the most informative message
98 		 * to users:
99 		 */
100 		if (xfeatures_missing)
101 			xfeatures_print = xfeatures_missing;
102 		else
103 			xfeatures_print = xfeatures_needed;
104 
105 		xfeature_idx = fls64(xfeatures_print)-1;
106 		max_idx = ARRAY_SIZE(xfeature_names)-1;
107 		xfeature_idx = min(xfeature_idx, max_idx);
108 
109 		*feature_name = xfeature_names[xfeature_idx];
110 	}
111 
112 	if (xfeatures_missing)
113 		return 0;
114 
115 	return 1;
116 }
117 EXPORT_SYMBOL_GPL(cpu_has_xfeatures);
118 
xfeature_is_supervisor(int xfeature_nr)119 static bool xfeature_is_supervisor(int xfeature_nr)
120 {
121 	/*
122 	 * Extended State Enumeration Sub-leaves (EAX = 0DH, ECX = n, n > 1)
123 	 * returns ECX[0] set to (1) for a supervisor state, and cleared (0)
124 	 * for a user state.
125 	 */
126 	u32 eax, ebx, ecx, edx;
127 
128 	cpuid_count(XSTATE_CPUID, xfeature_nr, &eax, &ebx, &ecx, &edx);
129 	return ecx & 1;
130 }
131 
132 /*
133  * Enable the extended processor state save/restore feature.
134  * Called once per CPU onlining.
135  */
fpu__init_cpu_xstate(void)136 void fpu__init_cpu_xstate(void)
137 {
138 	if (!boot_cpu_has(X86_FEATURE_XSAVE) || !xfeatures_mask_all)
139 		return;
140 
141 	cr4_set_bits(X86_CR4_OSXSAVE);
142 
143 	/*
144 	 * XCR_XFEATURE_ENABLED_MASK (aka. XCR0) sets user features
145 	 * managed by XSAVE{C, OPT, S} and XRSTOR{S}.  Only XSAVE user
146 	 * states can be set here.
147 	 */
148 	xsetbv(XCR_XFEATURE_ENABLED_MASK, xfeatures_mask_uabi());
149 
150 	/*
151 	 * MSR_IA32_XSS sets supervisor states managed by XSAVES.
152 	 */
153 	if (boot_cpu_has(X86_FEATURE_XSAVES)) {
154 		wrmsrl(MSR_IA32_XSS, xfeatures_mask_supervisor() |
155 				     xfeatures_mask_independent());
156 	}
157 }
158 
xfeature_enabled(enum xfeature xfeature)159 static bool xfeature_enabled(enum xfeature xfeature)
160 {
161 	return xfeatures_mask_all & BIT_ULL(xfeature);
162 }
163 
164 /*
165  * Record the offsets and sizes of various xstates contained
166  * in the XSAVE state memory layout.
167  */
setup_xstate_features(void)168 static void __init setup_xstate_features(void)
169 {
170 	u32 eax, ebx, ecx, edx, i;
171 	/* start at the beginning of the "extended state" */
172 	unsigned int last_good_offset = offsetof(struct xregs_state,
173 						 extended_state_area);
174 	/*
175 	 * The FP xstates and SSE xstates are legacy states. They are always
176 	 * in the fixed offsets in the xsave area in either compacted form
177 	 * or standard form.
178 	 */
179 	xstate_offsets[XFEATURE_FP]	= 0;
180 	xstate_sizes[XFEATURE_FP]	= offsetof(struct fxregs_state,
181 						   xmm_space);
182 
183 	xstate_offsets[XFEATURE_SSE]	= xstate_sizes[XFEATURE_FP];
184 	xstate_sizes[XFEATURE_SSE]	= sizeof_field(struct fxregs_state,
185 						       xmm_space);
186 
187 	for (i = FIRST_EXTENDED_XFEATURE; i < XFEATURE_MAX; i++) {
188 		if (!xfeature_enabled(i))
189 			continue;
190 
191 		cpuid_count(XSTATE_CPUID, i, &eax, &ebx, &ecx, &edx);
192 
193 		xstate_sizes[i] = eax;
194 
195 		/*
196 		 * If an xfeature is supervisor state, the offset in EBX is
197 		 * invalid, leave it to -1.
198 		 */
199 		if (xfeature_is_supervisor(i))
200 			continue;
201 
202 		xstate_offsets[i] = ebx;
203 
204 		/*
205 		 * In our xstate size checks, we assume that the highest-numbered
206 		 * xstate feature has the highest offset in the buffer.  Ensure
207 		 * it does.
208 		 */
209 		WARN_ONCE(last_good_offset > xstate_offsets[i],
210 			  "x86/fpu: misordered xstate at %d\n", last_good_offset);
211 
212 		last_good_offset = xstate_offsets[i];
213 	}
214 }
215 
print_xstate_feature(u64 xstate_mask)216 static void __init print_xstate_feature(u64 xstate_mask)
217 {
218 	const char *feature_name;
219 
220 	if (cpu_has_xfeatures(xstate_mask, &feature_name))
221 		pr_info("x86/fpu: Supporting XSAVE feature 0x%03Lx: '%s'\n", xstate_mask, feature_name);
222 }
223 
224 /*
225  * Print out all the supported xstate features:
226  */
print_xstate_features(void)227 static void __init print_xstate_features(void)
228 {
229 	print_xstate_feature(XFEATURE_MASK_FP);
230 	print_xstate_feature(XFEATURE_MASK_SSE);
231 	print_xstate_feature(XFEATURE_MASK_YMM);
232 	print_xstate_feature(XFEATURE_MASK_BNDREGS);
233 	print_xstate_feature(XFEATURE_MASK_BNDCSR);
234 	print_xstate_feature(XFEATURE_MASK_OPMASK);
235 	print_xstate_feature(XFEATURE_MASK_ZMM_Hi256);
236 	print_xstate_feature(XFEATURE_MASK_Hi16_ZMM);
237 	print_xstate_feature(XFEATURE_MASK_PKRU);
238 	print_xstate_feature(XFEATURE_MASK_PASID);
239 }
240 
241 /*
242  * This check is important because it is easy to get XSTATE_*
243  * confused with XSTATE_BIT_*.
244  */
245 #define CHECK_XFEATURE(nr) do {		\
246 	WARN_ON(nr < FIRST_EXTENDED_XFEATURE);	\
247 	WARN_ON(nr >= XFEATURE_MAX);	\
248 } while (0)
249 
250 /*
251  * We could cache this like xstate_size[], but we only use
252  * it here, so it would be a waste of space.
253  */
xfeature_is_aligned(int xfeature_nr)254 static int xfeature_is_aligned(int xfeature_nr)
255 {
256 	u32 eax, ebx, ecx, edx;
257 
258 	CHECK_XFEATURE(xfeature_nr);
259 
260 	if (!xfeature_enabled(xfeature_nr)) {
261 		WARN_ONCE(1, "Checking alignment of disabled xfeature %d\n",
262 			  xfeature_nr);
263 		return 0;
264 	}
265 
266 	cpuid_count(XSTATE_CPUID, xfeature_nr, &eax, &ebx, &ecx, &edx);
267 	/*
268 	 * The value returned by ECX[1] indicates the alignment
269 	 * of state component 'i' when the compacted format
270 	 * of the extended region of an XSAVE area is used:
271 	 */
272 	return !!(ecx & 2);
273 }
274 
275 /*
276  * This function sets up offsets and sizes of all extended states in
277  * xsave area. This supports both standard format and compacted format
278  * of the xsave area.
279  */
setup_xstate_comp_offsets(void)280 static void __init setup_xstate_comp_offsets(void)
281 {
282 	unsigned int next_offset;
283 	int i;
284 
285 	/*
286 	 * The FP xstates and SSE xstates are legacy states. They are always
287 	 * in the fixed offsets in the xsave area in either compacted form
288 	 * or standard form.
289 	 */
290 	xstate_comp_offsets[XFEATURE_FP] = 0;
291 	xstate_comp_offsets[XFEATURE_SSE] = offsetof(struct fxregs_state,
292 						     xmm_space);
293 
294 	if (!boot_cpu_has(X86_FEATURE_XSAVES)) {
295 		for (i = FIRST_EXTENDED_XFEATURE; i < XFEATURE_MAX; i++) {
296 			if (xfeature_enabled(i))
297 				xstate_comp_offsets[i] = xstate_offsets[i];
298 		}
299 		return;
300 	}
301 
302 	next_offset = FXSAVE_SIZE + XSAVE_HDR_SIZE;
303 
304 	for (i = FIRST_EXTENDED_XFEATURE; i < XFEATURE_MAX; i++) {
305 		if (!xfeature_enabled(i))
306 			continue;
307 
308 		if (xfeature_is_aligned(i))
309 			next_offset = ALIGN(next_offset, 64);
310 
311 		xstate_comp_offsets[i] = next_offset;
312 		next_offset += xstate_sizes[i];
313 	}
314 }
315 
316 /*
317  * Setup offsets of a supervisor-state-only XSAVES buffer:
318  *
319  * The offsets stored in xstate_comp_offsets[] only work for one specific
320  * value of the Requested Feature BitMap (RFBM).  In cases where a different
321  * RFBM value is used, a different set of offsets is required.  This set of
322  * offsets is for when RFBM=xfeatures_mask_supervisor().
323  */
setup_supervisor_only_offsets(void)324 static void __init setup_supervisor_only_offsets(void)
325 {
326 	unsigned int next_offset;
327 	int i;
328 
329 	next_offset = FXSAVE_SIZE + XSAVE_HDR_SIZE;
330 
331 	for (i = FIRST_EXTENDED_XFEATURE; i < XFEATURE_MAX; i++) {
332 		if (!xfeature_enabled(i) || !xfeature_is_supervisor(i))
333 			continue;
334 
335 		if (xfeature_is_aligned(i))
336 			next_offset = ALIGN(next_offset, 64);
337 
338 		xstate_supervisor_only_offsets[i] = next_offset;
339 		next_offset += xstate_sizes[i];
340 	}
341 }
342 
343 /*
344  * Print out xstate component offsets and sizes
345  */
print_xstate_offset_size(void)346 static void __init print_xstate_offset_size(void)
347 {
348 	int i;
349 
350 	for (i = FIRST_EXTENDED_XFEATURE; i < XFEATURE_MAX; i++) {
351 		if (!xfeature_enabled(i))
352 			continue;
353 		pr_info("x86/fpu: xstate_offset[%d]: %4d, xstate_sizes[%d]: %4d\n",
354 			 i, xstate_comp_offsets[i], i, xstate_sizes[i]);
355 	}
356 }
357 
358 /*
359  * All supported features have either init state all zeros or are
360  * handled in setup_init_fpu() individually. This is an explicit
361  * feature list and does not use XFEATURE_MASK*SUPPORTED to catch
362  * newly added supported features at build time and make people
363  * actually look at the init state for the new feature.
364  */
365 #define XFEATURES_INIT_FPSTATE_HANDLED		\
366 	(XFEATURE_MASK_FP |			\
367 	 XFEATURE_MASK_SSE |			\
368 	 XFEATURE_MASK_YMM |			\
369 	 XFEATURE_MASK_OPMASK |			\
370 	 XFEATURE_MASK_ZMM_Hi256 |		\
371 	 XFEATURE_MASK_Hi16_ZMM	 |		\
372 	 XFEATURE_MASK_PKRU |			\
373 	 XFEATURE_MASK_BNDREGS |		\
374 	 XFEATURE_MASK_BNDCSR |			\
375 	 XFEATURE_MASK_PASID)
376 
377 /*
378  * setup the xstate image representing the init state
379  */
setup_init_fpu_buf(void)380 static void __init setup_init_fpu_buf(void)
381 {
382 	static int on_boot_cpu __initdata = 1;
383 
384 	BUILD_BUG_ON((XFEATURE_MASK_USER_SUPPORTED |
385 		      XFEATURE_MASK_SUPERVISOR_SUPPORTED) !=
386 		     XFEATURES_INIT_FPSTATE_HANDLED);
387 
388 	WARN_ON_FPU(!on_boot_cpu);
389 	on_boot_cpu = 0;
390 
391 	if (!boot_cpu_has(X86_FEATURE_XSAVE))
392 		return;
393 
394 	setup_xstate_features();
395 	print_xstate_features();
396 
397 	if (boot_cpu_has(X86_FEATURE_XSAVES))
398 		init_fpstate.xsave.header.xcomp_bv = XCOMP_BV_COMPACTED_FORMAT |
399 						     xfeatures_mask_all;
400 
401 	/*
402 	 * Init all the features state with header.xfeatures being 0x0
403 	 */
404 	os_xrstor_booting(&init_fpstate.xsave);
405 
406 	/*
407 	 * All components are now in init state. Read the state back so
408 	 * that init_fpstate contains all non-zero init state. This only
409 	 * works with XSAVE, but not with XSAVEOPT and XSAVES because
410 	 * those use the init optimization which skips writing data for
411 	 * components in init state.
412 	 *
413 	 * XSAVE could be used, but that would require to reshuffle the
414 	 * data when XSAVES is available because XSAVES uses xstate
415 	 * compaction. But doing so is a pointless exercise because most
416 	 * components have an all zeros init state except for the legacy
417 	 * ones (FP and SSE). Those can be saved with FXSAVE into the
418 	 * legacy area. Adding new features requires to ensure that init
419 	 * state is all zeroes or if not to add the necessary handling
420 	 * here.
421 	 */
422 	fxsave(&init_fpstate.fxsave);
423 }
424 
xfeature_uncompacted_offset(int xfeature_nr)425 static int xfeature_uncompacted_offset(int xfeature_nr)
426 {
427 	u32 eax, ebx, ecx, edx;
428 
429 	/*
430 	 * Only XSAVES supports supervisor states and it uses compacted
431 	 * format. Checking a supervisor state's uncompacted offset is
432 	 * an error.
433 	 */
434 	if (XFEATURE_MASK_SUPERVISOR_ALL & BIT_ULL(xfeature_nr)) {
435 		WARN_ONCE(1, "No fixed offset for xstate %d\n", xfeature_nr);
436 		return -1;
437 	}
438 
439 	CHECK_XFEATURE(xfeature_nr);
440 	cpuid_count(XSTATE_CPUID, xfeature_nr, &eax, &ebx, &ecx, &edx);
441 	return ebx;
442 }
443 
xfeature_size(int xfeature_nr)444 int xfeature_size(int xfeature_nr)
445 {
446 	u32 eax, ebx, ecx, edx;
447 
448 	CHECK_XFEATURE(xfeature_nr);
449 	cpuid_count(XSTATE_CPUID, xfeature_nr, &eax, &ebx, &ecx, &edx);
450 	return eax;
451 }
452 
453 /* Validate an xstate header supplied by userspace (ptrace or sigreturn) */
validate_user_xstate_header(const struct xstate_header * hdr)454 static int validate_user_xstate_header(const struct xstate_header *hdr)
455 {
456 	/* No unknown or supervisor features may be set */
457 	if (hdr->xfeatures & ~xfeatures_mask_uabi())
458 		return -EINVAL;
459 
460 	/* Userspace must use the uncompacted format */
461 	if (hdr->xcomp_bv)
462 		return -EINVAL;
463 
464 	/*
465 	 * If 'reserved' is shrunken to add a new field, make sure to validate
466 	 * that new field here!
467 	 */
468 	BUILD_BUG_ON(sizeof(hdr->reserved) != 48);
469 
470 	/* No reserved bits may be set */
471 	if (memchr_inv(hdr->reserved, 0, sizeof(hdr->reserved)))
472 		return -EINVAL;
473 
474 	return 0;
475 }
476 
__xstate_dump_leaves(void)477 static void __xstate_dump_leaves(void)
478 {
479 	int i;
480 	u32 eax, ebx, ecx, edx;
481 	static int should_dump = 1;
482 
483 	if (!should_dump)
484 		return;
485 	should_dump = 0;
486 	/*
487 	 * Dump out a few leaves past the ones that we support
488 	 * just in case there are some goodies up there
489 	 */
490 	for (i = 0; i < XFEATURE_MAX + 10; i++) {
491 		cpuid_count(XSTATE_CPUID, i, &eax, &ebx, &ecx, &edx);
492 		pr_warn("CPUID[%02x, %02x]: eax=%08x ebx=%08x ecx=%08x edx=%08x\n",
493 			XSTATE_CPUID, i, eax, ebx, ecx, edx);
494 	}
495 }
496 
497 #define XSTATE_WARN_ON(x) do {							\
498 	if (WARN_ONCE(x, "XSAVE consistency problem, dumping leaves")) {	\
499 		__xstate_dump_leaves();						\
500 	}									\
501 } while (0)
502 
503 #define XCHECK_SZ(sz, nr, nr_macro, __struct) do {			\
504 	if ((nr == nr_macro) &&						\
505 	    WARN_ONCE(sz != sizeof(__struct),				\
506 		"%s: struct is %zu bytes, cpu state %d bytes\n",	\
507 		__stringify(nr_macro), sizeof(__struct), sz)) {		\
508 		__xstate_dump_leaves();					\
509 	}								\
510 } while (0)
511 
512 /*
513  * We have a C struct for each 'xstate'.  We need to ensure
514  * that our software representation matches what the CPU
515  * tells us about the state's size.
516  */
check_xstate_against_struct(int nr)517 static void check_xstate_against_struct(int nr)
518 {
519 	/*
520 	 * Ask the CPU for the size of the state.
521 	 */
522 	int sz = xfeature_size(nr);
523 	/*
524 	 * Match each CPU state with the corresponding software
525 	 * structure.
526 	 */
527 	XCHECK_SZ(sz, nr, XFEATURE_YMM,       struct ymmh_struct);
528 	XCHECK_SZ(sz, nr, XFEATURE_BNDREGS,   struct mpx_bndreg_state);
529 	XCHECK_SZ(sz, nr, XFEATURE_BNDCSR,    struct mpx_bndcsr_state);
530 	XCHECK_SZ(sz, nr, XFEATURE_OPMASK,    struct avx_512_opmask_state);
531 	XCHECK_SZ(sz, nr, XFEATURE_ZMM_Hi256, struct avx_512_zmm_uppers_state);
532 	XCHECK_SZ(sz, nr, XFEATURE_Hi16_ZMM,  struct avx_512_hi16_state);
533 	XCHECK_SZ(sz, nr, XFEATURE_PKRU,      struct pkru_state);
534 	XCHECK_SZ(sz, nr, XFEATURE_PASID,     struct ia32_pasid_state);
535 
536 	/*
537 	 * Make *SURE* to add any feature numbers in below if
538 	 * there are "holes" in the xsave state component
539 	 * numbers.
540 	 */
541 	if ((nr < XFEATURE_YMM) ||
542 	    (nr >= XFEATURE_MAX) ||
543 	    (nr == XFEATURE_PT_UNIMPLEMENTED_SO_FAR) ||
544 	    ((nr >= XFEATURE_RSRVD_COMP_11) && (nr <= XFEATURE_LBR))) {
545 		WARN_ONCE(1, "no structure for xstate: %d\n", nr);
546 		XSTATE_WARN_ON(1);
547 	}
548 }
549 
550 /*
551  * This essentially double-checks what the cpu told us about
552  * how large the XSAVE buffer needs to be.  We are recalculating
553  * it to be safe.
554  *
555  * Independent XSAVE features allocate their own buffers and are not
556  * covered by these checks. Only the size of the buffer for task->fpu
557  * is checked here.
558  */
do_extra_xstate_size_checks(void)559 static void do_extra_xstate_size_checks(void)
560 {
561 	int paranoid_xstate_size = FXSAVE_SIZE + XSAVE_HDR_SIZE;
562 	int i;
563 
564 	for (i = FIRST_EXTENDED_XFEATURE; i < XFEATURE_MAX; i++) {
565 		if (!xfeature_enabled(i))
566 			continue;
567 
568 		check_xstate_against_struct(i);
569 		/*
570 		 * Supervisor state components can be managed only by
571 		 * XSAVES.
572 		 */
573 		if (!cpu_feature_enabled(X86_FEATURE_XSAVES))
574 			XSTATE_WARN_ON(xfeature_is_supervisor(i));
575 
576 		/* Align from the end of the previous feature */
577 		if (xfeature_is_aligned(i))
578 			paranoid_xstate_size = ALIGN(paranoid_xstate_size, 64);
579 		/*
580 		 * The offset of a given state in the non-compacted
581 		 * format is given to us in a CPUID leaf.  We check
582 		 * them for being ordered (increasing offsets) in
583 		 * setup_xstate_features(). XSAVES uses compacted format.
584 		 */
585 		if (!cpu_feature_enabled(X86_FEATURE_XSAVES))
586 			paranoid_xstate_size = xfeature_uncompacted_offset(i);
587 		/*
588 		 * The compacted-format offset always depends on where
589 		 * the previous state ended.
590 		 */
591 		paranoid_xstate_size += xfeature_size(i);
592 	}
593 	XSTATE_WARN_ON(paranoid_xstate_size != fpu_kernel_xstate_size);
594 }
595 
596 
597 /*
598  * Get total size of enabled xstates in XCR0 | IA32_XSS.
599  *
600  * Note the SDM's wording here.  "sub-function 0" only enumerates
601  * the size of the *user* states.  If we use it to size a buffer
602  * that we use 'XSAVES' on, we could potentially overflow the
603  * buffer because 'XSAVES' saves system states too.
604  */
get_xsaves_size(void)605 static unsigned int __init get_xsaves_size(void)
606 {
607 	unsigned int eax, ebx, ecx, edx;
608 	/*
609 	 * - CPUID function 0DH, sub-function 1:
610 	 *    EBX enumerates the size (in bytes) required by
611 	 *    the XSAVES instruction for an XSAVE area
612 	 *    containing all the state components
613 	 *    corresponding to bits currently set in
614 	 *    XCR0 | IA32_XSS.
615 	 */
616 	cpuid_count(XSTATE_CPUID, 1, &eax, &ebx, &ecx, &edx);
617 	return ebx;
618 }
619 
620 /*
621  * Get the total size of the enabled xstates without the independent supervisor
622  * features.
623  */
get_xsaves_size_no_independent(void)624 static unsigned int __init get_xsaves_size_no_independent(void)
625 {
626 	u64 mask = xfeatures_mask_independent();
627 	unsigned int size;
628 
629 	if (!mask)
630 		return get_xsaves_size();
631 
632 	/* Disable independent features. */
633 	wrmsrl(MSR_IA32_XSS, xfeatures_mask_supervisor());
634 
635 	/*
636 	 * Ask the hardware what size is required of the buffer.
637 	 * This is the size required for the task->fpu buffer.
638 	 */
639 	size = get_xsaves_size();
640 
641 	/* Re-enable independent features so XSAVES will work on them again. */
642 	wrmsrl(MSR_IA32_XSS, xfeatures_mask_supervisor() | mask);
643 
644 	return size;
645 }
646 
get_xsave_size(void)647 static unsigned int __init get_xsave_size(void)
648 {
649 	unsigned int eax, ebx, ecx, edx;
650 	/*
651 	 * - CPUID function 0DH, sub-function 0:
652 	 *    EBX enumerates the size (in bytes) required by
653 	 *    the XSAVE instruction for an XSAVE area
654 	 *    containing all the *user* state components
655 	 *    corresponding to bits currently set in XCR0.
656 	 */
657 	cpuid_count(XSTATE_CPUID, 0, &eax, &ebx, &ecx, &edx);
658 	return ebx;
659 }
660 
661 /*
662  * Will the runtime-enumerated 'xstate_size' fit in the init
663  * task's statically-allocated buffer?
664  */
is_supported_xstate_size(unsigned int test_xstate_size)665 static bool is_supported_xstate_size(unsigned int test_xstate_size)
666 {
667 	if (test_xstate_size <= sizeof(union fpregs_state))
668 		return true;
669 
670 	pr_warn("x86/fpu: xstate buffer too small (%zu < %d), disabling xsave\n",
671 			sizeof(union fpregs_state), test_xstate_size);
672 	return false;
673 }
674 
init_xstate_size(void)675 static int __init init_xstate_size(void)
676 {
677 	/* Recompute the context size for enabled features: */
678 	unsigned int possible_xstate_size;
679 	unsigned int xsave_size;
680 
681 	xsave_size = get_xsave_size();
682 
683 	if (boot_cpu_has(X86_FEATURE_XSAVES))
684 		possible_xstate_size = get_xsaves_size_no_independent();
685 	else
686 		possible_xstate_size = xsave_size;
687 
688 	/* Ensure we have the space to store all enabled: */
689 	if (!is_supported_xstate_size(possible_xstate_size))
690 		return -EINVAL;
691 
692 	/*
693 	 * The size is OK, we are definitely going to use xsave,
694 	 * make it known to the world that we need more space.
695 	 */
696 	fpu_kernel_xstate_size = possible_xstate_size;
697 	do_extra_xstate_size_checks();
698 
699 	/*
700 	 * User space is always in standard format.
701 	 */
702 	fpu_user_xstate_size = xsave_size;
703 	return 0;
704 }
705 
706 /*
707  * We enabled the XSAVE hardware, but something went wrong and
708  * we can not use it.  Disable it.
709  */
fpu__init_disable_system_xstate(void)710 static void fpu__init_disable_system_xstate(void)
711 {
712 	xfeatures_mask_all = 0;
713 	cr4_clear_bits(X86_CR4_OSXSAVE);
714 	setup_clear_cpu_cap(X86_FEATURE_XSAVE);
715 }
716 
717 /*
718  * Enable and initialize the xsave feature.
719  * Called once per system bootup.
720  */
fpu__init_system_xstate(void)721 void __init fpu__init_system_xstate(void)
722 {
723 	unsigned int eax, ebx, ecx, edx;
724 	static int on_boot_cpu __initdata = 1;
725 	u64 xfeatures;
726 	int err;
727 	int i;
728 
729 	WARN_ON_FPU(!on_boot_cpu);
730 	on_boot_cpu = 0;
731 
732 	if (!boot_cpu_has(X86_FEATURE_FPU)) {
733 		pr_info("x86/fpu: No FPU detected\n");
734 		return;
735 	}
736 
737 	if (!boot_cpu_has(X86_FEATURE_XSAVE)) {
738 		pr_info("x86/fpu: x87 FPU will use %s\n",
739 			boot_cpu_has(X86_FEATURE_FXSR) ? "FXSAVE" : "FSAVE");
740 		return;
741 	}
742 
743 	if (boot_cpu_data.cpuid_level < XSTATE_CPUID) {
744 		WARN_ON_FPU(1);
745 		return;
746 	}
747 
748 	/*
749 	 * Find user xstates supported by the processor.
750 	 */
751 	cpuid_count(XSTATE_CPUID, 0, &eax, &ebx, &ecx, &edx);
752 	xfeatures_mask_all = eax + ((u64)edx << 32);
753 
754 	/*
755 	 * Find supervisor xstates supported by the processor.
756 	 */
757 	cpuid_count(XSTATE_CPUID, 1, &eax, &ebx, &ecx, &edx);
758 	xfeatures_mask_all |= ecx + ((u64)edx << 32);
759 
760 	if ((xfeatures_mask_uabi() & XFEATURE_MASK_FPSSE) != XFEATURE_MASK_FPSSE) {
761 		/*
762 		 * This indicates that something really unexpected happened
763 		 * with the enumeration.  Disable XSAVE and try to continue
764 		 * booting without it.  This is too early to BUG().
765 		 */
766 		pr_err("x86/fpu: FP/SSE not present amongst the CPU's xstate features: 0x%llx.\n",
767 		       xfeatures_mask_all);
768 		goto out_disable;
769 	}
770 
771 	/*
772 	 * Clear XSAVE features that are disabled in the normal CPUID.
773 	 */
774 	for (i = 0; i < ARRAY_SIZE(xsave_cpuid_features); i++) {
775 		if (!boot_cpu_has(xsave_cpuid_features[i]))
776 			xfeatures_mask_all &= ~BIT_ULL(i);
777 	}
778 
779 	xfeatures_mask_all &= XFEATURE_MASK_USER_SUPPORTED |
780 			      XFEATURE_MASK_SUPERVISOR_SUPPORTED;
781 
782 	/* Store it for paranoia check at the end */
783 	xfeatures = xfeatures_mask_all;
784 
785 	/* Enable xstate instructions to be able to continue with initialization: */
786 	fpu__init_cpu_xstate();
787 	err = init_xstate_size();
788 	if (err)
789 		goto out_disable;
790 
791 	/*
792 	 * Update info used for ptrace frames; use standard-format size and no
793 	 * supervisor xstates:
794 	 */
795 	update_regset_xstate_info(fpu_user_xstate_size, xfeatures_mask_uabi());
796 
797 	fpu__init_prepare_fx_sw_frame();
798 	setup_init_fpu_buf();
799 	setup_xstate_comp_offsets();
800 	setup_supervisor_only_offsets();
801 
802 	/*
803 	 * Paranoia check whether something in the setup modified the
804 	 * xfeatures mask.
805 	 */
806 	if (xfeatures != xfeatures_mask_all) {
807 		pr_err("x86/fpu: xfeatures modified from 0x%016llx to 0x%016llx during init, disabling XSAVE\n",
808 		       xfeatures, xfeatures_mask_all);
809 		goto out_disable;
810 	}
811 
812 	/*
813 	 * CPU capabilities initialization runs before FPU init. So
814 	 * X86_FEATURE_OSXSAVE is not set. Now that XSAVE is completely
815 	 * functional, set the feature bit so depending code works.
816 	 */
817 	setup_force_cpu_cap(X86_FEATURE_OSXSAVE);
818 
819 	print_xstate_offset_size();
820 	pr_info("x86/fpu: Enabled xstate features 0x%llx, context size is %d bytes, using '%s' format.\n",
821 		xfeatures_mask_all,
822 		fpu_kernel_xstate_size,
823 		boot_cpu_has(X86_FEATURE_XSAVES) ? "compacted" : "standard");
824 	return;
825 
826 out_disable:
827 	/* something went wrong, try to boot without any XSAVE support */
828 	fpu__init_disable_system_xstate();
829 }
830 
831 /*
832  * Restore minimal FPU state after suspend:
833  */
fpu__resume_cpu(void)834 void fpu__resume_cpu(void)
835 {
836 	/*
837 	 * Restore XCR0 on xsave capable CPUs:
838 	 */
839 	if (cpu_feature_enabled(X86_FEATURE_XSAVE))
840 		xsetbv(XCR_XFEATURE_ENABLED_MASK, xfeatures_mask_uabi());
841 
842 	/*
843 	 * Restore IA32_XSS. The same CPUID bit enumerates support
844 	 * of XSAVES and MSR_IA32_XSS.
845 	 */
846 	if (cpu_feature_enabled(X86_FEATURE_XSAVES)) {
847 		wrmsrl(MSR_IA32_XSS, xfeatures_mask_supervisor()  |
848 				     xfeatures_mask_independent());
849 	}
850 }
851 
852 /*
853  * Given an xstate feature nr, calculate where in the xsave
854  * buffer the state is.  Callers should ensure that the buffer
855  * is valid.
856  */
__raw_xsave_addr(struct xregs_state * xsave,int xfeature_nr)857 static void *__raw_xsave_addr(struct xregs_state *xsave, int xfeature_nr)
858 {
859 	if (!xfeature_enabled(xfeature_nr)) {
860 		WARN_ON_FPU(1);
861 		return NULL;
862 	}
863 
864 	return (void *)xsave + xstate_comp_offsets[xfeature_nr];
865 }
866 /*
867  * Given the xsave area and a state inside, this function returns the
868  * address of the state.
869  *
870  * This is the API that is called to get xstate address in either
871  * standard format or compacted format of xsave area.
872  *
873  * Note that if there is no data for the field in the xsave buffer
874  * this will return NULL.
875  *
876  * Inputs:
877  *	xstate: the thread's storage area for all FPU data
878  *	xfeature_nr: state which is defined in xsave.h (e.g. XFEATURE_FP,
879  *	XFEATURE_SSE, etc...)
880  * Output:
881  *	address of the state in the xsave area, or NULL if the
882  *	field is not present in the xsave buffer.
883  */
get_xsave_addr(struct xregs_state * xsave,int xfeature_nr)884 void *get_xsave_addr(struct xregs_state *xsave, int xfeature_nr)
885 {
886 	/*
887 	 * Do we even *have* xsave state?
888 	 */
889 	if (!boot_cpu_has(X86_FEATURE_XSAVE))
890 		return NULL;
891 
892 	/*
893 	 * We should not ever be requesting features that we
894 	 * have not enabled.
895 	 */
896 	WARN_ONCE(!(xfeatures_mask_all & BIT_ULL(xfeature_nr)),
897 		  "get of unsupported state");
898 	/*
899 	 * This assumes the last 'xsave*' instruction to
900 	 * have requested that 'xfeature_nr' be saved.
901 	 * If it did not, we might be seeing and old value
902 	 * of the field in the buffer.
903 	 *
904 	 * This can happen because the last 'xsave' did not
905 	 * request that this feature be saved (unlikely)
906 	 * or because the "init optimization" caused it
907 	 * to not be saved.
908 	 */
909 	if (!(xsave->header.xfeatures & BIT_ULL(xfeature_nr)))
910 		return NULL;
911 
912 	return __raw_xsave_addr(xsave, xfeature_nr);
913 }
914 EXPORT_SYMBOL_GPL(get_xsave_addr);
915 
916 #ifdef CONFIG_ARCH_HAS_PKEYS
917 
918 /*
919  * This will go out and modify PKRU register to set the access
920  * rights for @pkey to @init_val.
921  */
arch_set_user_pkey_access(struct task_struct * tsk,int pkey,unsigned long init_val)922 int arch_set_user_pkey_access(struct task_struct *tsk, int pkey,
923 			      unsigned long init_val)
924 {
925 	u32 old_pkru, new_pkru_bits = 0;
926 	int pkey_shift;
927 
928 	/*
929 	 * This check implies XSAVE support.  OSPKE only gets
930 	 * set if we enable XSAVE and we enable PKU in XCR0.
931 	 */
932 	if (!cpu_feature_enabled(X86_FEATURE_OSPKE))
933 		return -EINVAL;
934 
935 	/*
936 	 * This code should only be called with valid 'pkey'
937 	 * values originating from in-kernel users.  Complain
938 	 * if a bad value is observed.
939 	 */
940 	if (WARN_ON_ONCE(pkey >= arch_max_pkey()))
941 		return -EINVAL;
942 
943 	/* Set the bits we need in PKRU:  */
944 	if (init_val & PKEY_DISABLE_ACCESS)
945 		new_pkru_bits |= PKRU_AD_BIT;
946 	if (init_val & PKEY_DISABLE_WRITE)
947 		new_pkru_bits |= PKRU_WD_BIT;
948 
949 	/* Shift the bits in to the correct place in PKRU for pkey: */
950 	pkey_shift = pkey * PKRU_BITS_PER_PKEY;
951 	new_pkru_bits <<= pkey_shift;
952 
953 	/* Get old PKRU and mask off any old bits in place: */
954 	old_pkru = read_pkru();
955 	old_pkru &= ~((PKRU_AD_BIT|PKRU_WD_BIT) << pkey_shift);
956 
957 	/* Write old part along with new part: */
958 	write_pkru(old_pkru | new_pkru_bits);
959 
960 	return 0;
961 }
962 #endif /* ! CONFIG_ARCH_HAS_PKEYS */
963 
copy_feature(bool from_xstate,struct membuf * to,void * xstate,void * init_xstate,unsigned int size)964 static void copy_feature(bool from_xstate, struct membuf *to, void *xstate,
965 			 void *init_xstate, unsigned int size)
966 {
967 	membuf_write(to, from_xstate ? xstate : init_xstate, size);
968 }
969 
970 /**
971  * copy_xstate_to_uabi_buf - Copy kernel saved xstate to a UABI buffer
972  * @to:		membuf descriptor
973  * @tsk:	The task from which to copy the saved xstate
974  * @copy_mode:	The requested copy mode
975  *
976  * Converts from kernel XSAVE or XSAVES compacted format to UABI conforming
977  * format, i.e. from the kernel internal hardware dependent storage format
978  * to the requested @mode. UABI XSTATE is always uncompacted!
979  *
980  * It supports partial copy but @to.pos always starts from zero.
981  */
copy_xstate_to_uabi_buf(struct membuf to,struct task_struct * tsk,enum xstate_copy_mode copy_mode)982 void copy_xstate_to_uabi_buf(struct membuf to, struct task_struct *tsk,
983 			     enum xstate_copy_mode copy_mode)
984 {
985 	const unsigned int off_mxcsr = offsetof(struct fxregs_state, mxcsr);
986 	struct xregs_state *xsave = &tsk->thread.fpu.state.xsave;
987 	struct xregs_state *xinit = &init_fpstate.xsave;
988 	struct xstate_header header;
989 	unsigned int zerofrom;
990 	int i;
991 
992 	memset(&header, 0, sizeof(header));
993 	header.xfeatures = xsave->header.xfeatures;
994 
995 	/* Mask out the feature bits depending on copy mode */
996 	switch (copy_mode) {
997 	case XSTATE_COPY_FP:
998 		header.xfeatures &= XFEATURE_MASK_FP;
999 		break;
1000 
1001 	case XSTATE_COPY_FX:
1002 		header.xfeatures &= XFEATURE_MASK_FP | XFEATURE_MASK_SSE;
1003 		break;
1004 
1005 	case XSTATE_COPY_XSAVE:
1006 		header.xfeatures &= xfeatures_mask_uabi();
1007 		break;
1008 	}
1009 
1010 	/* Copy FP state up to MXCSR */
1011 	copy_feature(header.xfeatures & XFEATURE_MASK_FP, &to, &xsave->i387,
1012 		     &xinit->i387, off_mxcsr);
1013 
1014 	/* Copy MXCSR when SSE or YMM are set in the feature mask */
1015 	copy_feature(header.xfeatures & (XFEATURE_MASK_SSE | XFEATURE_MASK_YMM),
1016 		     &to, &xsave->i387.mxcsr, &xinit->i387.mxcsr,
1017 		     MXCSR_AND_FLAGS_SIZE);
1018 
1019 	/* Copy the remaining FP state */
1020 	copy_feature(header.xfeatures & XFEATURE_MASK_FP,
1021 		     &to, &xsave->i387.st_space, &xinit->i387.st_space,
1022 		     sizeof(xsave->i387.st_space));
1023 
1024 	/* Copy the SSE state - shared with YMM, but independently managed */
1025 	copy_feature(header.xfeatures & XFEATURE_MASK_SSE,
1026 		     &to, &xsave->i387.xmm_space, &xinit->i387.xmm_space,
1027 		     sizeof(xsave->i387.xmm_space));
1028 
1029 	if (copy_mode != XSTATE_COPY_XSAVE)
1030 		goto out;
1031 
1032 	/* Zero the padding area */
1033 	membuf_zero(&to, sizeof(xsave->i387.padding));
1034 
1035 	/* Copy xsave->i387.sw_reserved */
1036 	membuf_write(&to, xstate_fx_sw_bytes, sizeof(xsave->i387.sw_reserved));
1037 
1038 	/* Copy the user space relevant state of @xsave->header */
1039 	membuf_write(&to, &header, sizeof(header));
1040 
1041 	zerofrom = offsetof(struct xregs_state, extended_state_area);
1042 
1043 	for (i = FIRST_EXTENDED_XFEATURE; i < XFEATURE_MAX; i++) {
1044 		/*
1045 		 * The ptrace buffer is in non-compacted XSAVE format.
1046 		 * In non-compacted format disabled features still occupy
1047 		 * state space, but there is no state to copy from in the
1048 		 * compacted init_fpstate. The gap tracking will zero this
1049 		 * later.
1050 		 */
1051 		if (!(xfeatures_mask_uabi() & BIT_ULL(i)))
1052 			continue;
1053 
1054 		/*
1055 		 * If there was a feature or alignment gap, zero the space
1056 		 * in the destination buffer.
1057 		 */
1058 		if (zerofrom < xstate_offsets[i])
1059 			membuf_zero(&to, xstate_offsets[i] - zerofrom);
1060 
1061 		if (i == XFEATURE_PKRU) {
1062 			struct pkru_state pkru = {0};
1063 			/*
1064 			 * PKRU is not necessarily up to date in the
1065 			 * thread's XSAVE buffer.  Fill this part from the
1066 			 * per-thread storage.
1067 			 */
1068 			pkru.pkru = tsk->thread.pkru;
1069 			membuf_write(&to, &pkru, sizeof(pkru));
1070 		} else {
1071 			copy_feature(header.xfeatures & BIT_ULL(i), &to,
1072 				     __raw_xsave_addr(xsave, i),
1073 				     __raw_xsave_addr(xinit, i),
1074 				     xstate_sizes[i]);
1075 		}
1076 		/*
1077 		 * Keep track of the last copied state in the non-compacted
1078 		 * target buffer for gap zeroing.
1079 		 */
1080 		zerofrom = xstate_offsets[i] + xstate_sizes[i];
1081 	}
1082 
1083 out:
1084 	if (to.left)
1085 		membuf_zero(&to, to.left);
1086 }
1087 
copy_from_buffer(void * dst,unsigned int offset,unsigned int size,const void * kbuf,const void __user * ubuf)1088 static int copy_from_buffer(void *dst, unsigned int offset, unsigned int size,
1089 			    const void *kbuf, const void __user *ubuf)
1090 {
1091 	if (kbuf) {
1092 		memcpy(dst, kbuf + offset, size);
1093 	} else {
1094 		if (copy_from_user(dst, ubuf + offset, size))
1095 			return -EFAULT;
1096 	}
1097 	return 0;
1098 }
1099 
1100 
1101 /**
1102  * copy_uabi_to_xstate - Copy a UABI format buffer to the kernel xstate
1103  * @fpstate:	The fpstate buffer to copy to
1104  * @kbuf:	The UABI format buffer, if it comes from the kernel
1105  * @ubuf:	The UABI format buffer, if it comes from userspace
1106  * @pkru:	The location to write the PKRU value to
1107  *
1108  * Converts from the UABI format into the kernel internal hardware
1109  * dependent format.
1110  *
1111  * This function ultimately has two different callers with distinct PKRU
1112  * behavior.
1113  * 1.	When called from sigreturn the PKRU register will be restored from
1114  *	@fpstate via an XRSTOR. Correctly copying the UABI format buffer to
1115  *	@fpstate is sufficient to cover this case, but the caller will also
1116  *	pass a pointer to the thread_struct's pkru field in @pkru and updating
1117  *	it is harmless.
1118  * 2.	When called from ptrace the PKRU register will be restored from the
1119  *	thread_struct's pkru field. A pointer to that is passed in @pkru.
1120  *	The kernel will restore it manually, so the XRSTOR behavior that resets
1121  *	the PKRU register to the hardware init value (0) if the corresponding
1122  *	xfeatures bit is not set is emulated here.
1123  */
copy_uabi_to_xstate(struct xregs_state * xsave,const void * kbuf,const void __user * ubuf,u32 * pkru)1124 static int copy_uabi_to_xstate(struct xregs_state *xsave, const void *kbuf,
1125 			       const void __user *ubuf, u32 *pkru)
1126 {
1127 	unsigned int offset, size;
1128 	struct xstate_header hdr;
1129 	u64 mask;
1130 	int i;
1131 
1132 	offset = offsetof(struct xregs_state, header);
1133 	if (copy_from_buffer(&hdr, offset, sizeof(hdr), kbuf, ubuf))
1134 		return -EFAULT;
1135 
1136 	if (validate_user_xstate_header(&hdr))
1137 		return -EINVAL;
1138 
1139 	/* Validate MXCSR when any of the related features is in use */
1140 	mask = XFEATURE_MASK_FP | XFEATURE_MASK_SSE | XFEATURE_MASK_YMM;
1141 	if (hdr.xfeatures & mask) {
1142 		u32 mxcsr[2];
1143 
1144 		offset = offsetof(struct fxregs_state, mxcsr);
1145 		if (copy_from_buffer(mxcsr, offset, sizeof(mxcsr), kbuf, ubuf))
1146 			return -EFAULT;
1147 
1148 		/* Reserved bits in MXCSR must be zero. */
1149 		if (mxcsr[0] & ~mxcsr_feature_mask)
1150 			return -EINVAL;
1151 
1152 		/* SSE and YMM require MXCSR even when FP is not in use. */
1153 		if (!(hdr.xfeatures & XFEATURE_MASK_FP)) {
1154 			xsave->i387.mxcsr = mxcsr[0];
1155 			xsave->i387.mxcsr_mask = mxcsr[1];
1156 		}
1157 	}
1158 
1159 	for (i = 0; i < XFEATURE_MAX; i++) {
1160 		u64 mask = ((u64)1 << i);
1161 
1162 		if (hdr.xfeatures & mask) {
1163 			void *dst = __raw_xsave_addr(xsave, i);
1164 
1165 			offset = xstate_offsets[i];
1166 			size = xstate_sizes[i];
1167 
1168 			if (copy_from_buffer(dst, offset, size, kbuf, ubuf))
1169 				return -EFAULT;
1170 		}
1171 	}
1172 
1173 	if (hdr.xfeatures & XFEATURE_MASK_PKRU) {
1174 		struct pkru_state *xpkru;
1175 
1176 		xpkru = __raw_xsave_addr(xsave, XFEATURE_PKRU);
1177 		*pkru = xpkru->pkru;
1178 	} else
1179 		*pkru = 0;
1180 
1181 	/*
1182 	 * The state that came in from userspace was user-state only.
1183 	 * Mask all the user states out of 'xfeatures':
1184 	 */
1185 	xsave->header.xfeatures &= XFEATURE_MASK_SUPERVISOR_ALL;
1186 
1187 	/*
1188 	 * Add back in the features that came in from userspace:
1189 	 */
1190 	xsave->header.xfeatures |= hdr.xfeatures;
1191 
1192 	return 0;
1193 }
1194 
1195 /*
1196  * Convert from a ptrace standard-format kernel buffer to kernel XSAVE[S]
1197  * format and copy to the target thread. This is called from
1198  * xstateregs_set().
1199  */
copy_uabi_from_kernel_to_xstate(struct xregs_state * xsave,const void * kbuf,u32 * pkru)1200 int copy_uabi_from_kernel_to_xstate(struct xregs_state *xsave, const void *kbuf, u32 *pkru)
1201 {
1202 	return copy_uabi_to_xstate(xsave, kbuf, NULL, pkru);
1203 }
1204 
1205 /*
1206  * Convert from a sigreturn standard-format user-space buffer to kernel
1207  * XSAVE[S] format and copy to the target thread. This is called from the
1208  * sigreturn() and rt_sigreturn() system calls.
1209  */
copy_sigframe_from_user_to_xstate(struct task_struct * tsk,const void __user * ubuf)1210 int copy_sigframe_from_user_to_xstate(struct task_struct *tsk,
1211 				      const void __user *ubuf)
1212 {
1213 	return copy_uabi_to_xstate(&tsk->thread.fpu.state.xsave, NULL, ubuf, &tsk->thread.pkru);
1214 }
1215 
validate_xsaves_xrstors(u64 mask)1216 static bool validate_xsaves_xrstors(u64 mask)
1217 {
1218 	u64 xchk;
1219 
1220 	if (WARN_ON_FPU(!cpu_feature_enabled(X86_FEATURE_XSAVES)))
1221 		return false;
1222 	/*
1223 	 * Validate that this is either a task->fpstate related component
1224 	 * subset or an independent one.
1225 	 */
1226 	if (mask & xfeatures_mask_independent())
1227 		xchk = ~xfeatures_mask_independent();
1228 	else
1229 		xchk = ~xfeatures_mask_all;
1230 
1231 	if (WARN_ON_ONCE(!mask || mask & xchk))
1232 		return false;
1233 
1234 	return true;
1235 }
1236 
1237 /**
1238  * xsaves - Save selected components to a kernel xstate buffer
1239  * @xstate:	Pointer to the buffer
1240  * @mask:	Feature mask to select the components to save
1241  *
1242  * The @xstate buffer must be 64 byte aligned and correctly initialized as
1243  * XSAVES does not write the full xstate header. Before first use the
1244  * buffer should be zeroed otherwise a consecutive XRSTORS from that buffer
1245  * can #GP.
1246  *
1247  * The feature mask must either be a subset of the independent features or
1248  * a subset of the task->fpstate related features.
1249  */
xsaves(struct xregs_state * xstate,u64 mask)1250 void xsaves(struct xregs_state *xstate, u64 mask)
1251 {
1252 	int err;
1253 
1254 	if (!validate_xsaves_xrstors(mask))
1255 		return;
1256 
1257 	XSTATE_OP(XSAVES, xstate, (u32)mask, (u32)(mask >> 32), err);
1258 	WARN_ON_ONCE(err);
1259 }
1260 
1261 /**
1262  * xrstors - Restore selected components from a kernel xstate buffer
1263  * @xstate:	Pointer to the buffer
1264  * @mask:	Feature mask to select the components to restore
1265  *
1266  * The @xstate buffer must be 64 byte aligned and correctly initialized
1267  * otherwise XRSTORS from that buffer can #GP.
1268  *
1269  * Proper usage is to restore the state which was saved with
1270  * xsaves() into @xstate.
1271  *
1272  * The feature mask must either be a subset of the independent features or
1273  * a subset of the task->fpstate related features.
1274  */
xrstors(struct xregs_state * xstate,u64 mask)1275 void xrstors(struct xregs_state *xstate, u64 mask)
1276 {
1277 	int err;
1278 
1279 	if (!validate_xsaves_xrstors(mask))
1280 		return;
1281 
1282 	XSTATE_OP(XRSTORS, xstate, (u32)mask, (u32)(mask >> 32), err);
1283 	WARN_ON_ONCE(err);
1284 }
1285 
1286 #ifdef CONFIG_PROC_PID_ARCH_STATUS
1287 /*
1288  * Report the amount of time elapsed in millisecond since last AVX512
1289  * use in the task.
1290  */
avx512_status(struct seq_file * m,struct task_struct * task)1291 static void avx512_status(struct seq_file *m, struct task_struct *task)
1292 {
1293 	unsigned long timestamp = READ_ONCE(task->thread.fpu.avx512_timestamp);
1294 	long delta;
1295 
1296 	if (!timestamp) {
1297 		/*
1298 		 * Report -1 if no AVX512 usage
1299 		 */
1300 		delta = -1;
1301 	} else {
1302 		delta = (long)(jiffies - timestamp);
1303 		/*
1304 		 * Cap to LONG_MAX if time difference > LONG_MAX
1305 		 */
1306 		if (delta < 0)
1307 			delta = LONG_MAX;
1308 		delta = jiffies_to_msecs(delta);
1309 	}
1310 
1311 	seq_put_decimal_ll(m, "AVX512_elapsed_ms:\t", delta);
1312 	seq_putc(m, '\n');
1313 }
1314 
1315 /*
1316  * Report architecture specific information
1317  */
proc_pid_arch_status(struct seq_file * m,struct pid_namespace * ns,struct pid * pid,struct task_struct * task)1318 int proc_pid_arch_status(struct seq_file *m, struct pid_namespace *ns,
1319 			struct pid *pid, struct task_struct *task)
1320 {
1321 	/*
1322 	 * Report AVX512 state if the processor and build option supported.
1323 	 */
1324 	if (cpu_feature_enabled(X86_FEATURE_AVX512F))
1325 		avx512_status(m, task);
1326 
1327 	return 0;
1328 }
1329 #endif /* CONFIG_PROC_PID_ARCH_STATUS */
1330