1 #ifndef __ASM_X86_XSAVE_H
2 #define __ASM_X86_XSAVE_H
3
4 #include <linux/types.h>
5 #include <asm/processor.h>
6
7 #define XSTATE_CPUID 0x0000000d
8
9 #define XSTATE_FP 0x1
10 #define XSTATE_SSE 0x2
11 #define XSTATE_YMM 0x4
12 #define XSTATE_BNDREGS 0x8
13 #define XSTATE_BNDCSR 0x10
14 #define XSTATE_OPMASK 0x20
15 #define XSTATE_ZMM_Hi256 0x40
16 #define XSTATE_Hi16_ZMM 0x80
17
18 #define XSTATE_FPSSE (XSTATE_FP | XSTATE_SSE)
19 /* Bit 63 of XCR0 is reserved for future expansion */
20 #define XSTATE_EXTEND_MASK (~(XSTATE_FPSSE | (1ULL << 63)))
21
22 #define FXSAVE_SIZE 512
23
24 #define XSAVE_HDR_SIZE 64
25 #define XSAVE_HDR_OFFSET FXSAVE_SIZE
26
27 #define XSAVE_YMM_SIZE 256
28 #define XSAVE_YMM_OFFSET (XSAVE_HDR_SIZE + XSAVE_HDR_OFFSET)
29
30 /* Supported features which support lazy state saving */
31 #define XSTATE_LAZY (XSTATE_FP | XSTATE_SSE | XSTATE_YMM \
32 | XSTATE_OPMASK | XSTATE_ZMM_Hi256 | XSTATE_Hi16_ZMM)
33
34 /* Supported features which require eager state saving */
35 #define XSTATE_EAGER (XSTATE_BNDREGS | XSTATE_BNDCSR)
36
37 /* All currently supported features */
38 #define XCNTXT_MASK (XSTATE_LAZY | XSTATE_EAGER)
39
40 #ifdef CONFIG_X86_64
41 #define REX_PREFIX "0x48, "
42 #else
43 #define REX_PREFIX
44 #endif
45
46 extern unsigned int xstate_size;
47 extern u64 pcntxt_mask;
48 extern u64 xstate_fx_sw_bytes[USER_XSTATE_FX_SW_WORDS];
49 extern struct xsave_struct *init_xstate_buf;
50
51 extern void xsave_init(void);
52 extern void update_regset_xstate_info(unsigned int size, u64 xstate_mask);
53 extern int init_fpu(struct task_struct *child);
54
55 /* These macros all use (%edi)/(%rdi) as the single memory argument. */
56 #define XSAVE ".byte " REX_PREFIX "0x0f,0xae,0x27"
57 #define XSAVEOPT ".byte " REX_PREFIX "0x0f,0xae,0x37"
58 #define XSAVES ".byte " REX_PREFIX "0x0f,0xc7,0x2f"
59 #define XRSTOR ".byte " REX_PREFIX "0x0f,0xae,0x2f"
60 #define XRSTORS ".byte " REX_PREFIX "0x0f,0xc7,0x1f"
61
62 #define xstate_fault ".section .fixup,\"ax\"\n" \
63 "3: movl $-1,%[err]\n" \
64 " jmp 2b\n" \
65 ".previous\n" \
66 _ASM_EXTABLE(1b, 3b) \
67 : [err] "=r" (err)
68
69 /*
70 * This function is called only during boot time when x86 caps are not set
71 * up and alternative can not be used yet.
72 */
xsave_state_booting(struct xsave_struct * fx,u64 mask)73 static inline int xsave_state_booting(struct xsave_struct *fx, u64 mask)
74 {
75 u32 lmask = mask;
76 u32 hmask = mask >> 32;
77 int err = 0;
78
79 WARN_ON(system_state != SYSTEM_BOOTING);
80
81 if (boot_cpu_has(X86_FEATURE_XSAVES))
82 asm volatile("1:"XSAVES"\n\t"
83 "2:\n\t"
84 xstate_fault
85 : "D" (fx), "m" (*fx), "a" (lmask), "d" (hmask)
86 : "memory");
87 else
88 asm volatile("1:"XSAVE"\n\t"
89 "2:\n\t"
90 xstate_fault
91 : "D" (fx), "m" (*fx), "a" (lmask), "d" (hmask)
92 : "memory");
93 return err;
94 }
95
96 /*
97 * This function is called only during boot time when x86 caps are not set
98 * up and alternative can not be used yet.
99 */
xrstor_state_booting(struct xsave_struct * fx,u64 mask)100 static inline int xrstor_state_booting(struct xsave_struct *fx, u64 mask)
101 {
102 u32 lmask = mask;
103 u32 hmask = mask >> 32;
104 int err = 0;
105
106 WARN_ON(system_state != SYSTEM_BOOTING);
107
108 if (boot_cpu_has(X86_FEATURE_XSAVES))
109 asm volatile("1:"XRSTORS"\n\t"
110 "2:\n\t"
111 xstate_fault
112 : "D" (fx), "m" (*fx), "a" (lmask), "d" (hmask)
113 : "memory");
114 else
115 asm volatile("1:"XRSTOR"\n\t"
116 "2:\n\t"
117 xstate_fault
118 : "D" (fx), "m" (*fx), "a" (lmask), "d" (hmask)
119 : "memory");
120 return err;
121 }
122
123 /*
124 * Save processor xstate to xsave area.
125 */
xsave_state(struct xsave_struct * fx,u64 mask)126 static inline int xsave_state(struct xsave_struct *fx, u64 mask)
127 {
128 u32 lmask = mask;
129 u32 hmask = mask >> 32;
130 int err = 0;
131
132 /*
133 * If xsaves is enabled, xsaves replaces xsaveopt because
134 * it supports compact format and supervisor states in addition to
135 * modified optimization in xsaveopt.
136 *
137 * Otherwise, if xsaveopt is enabled, xsaveopt replaces xsave
138 * because xsaveopt supports modified optimization which is not
139 * supported by xsave.
140 *
141 * If none of xsaves and xsaveopt is enabled, use xsave.
142 */
143 alternative_input_2(
144 "1:"XSAVE,
145 XSAVEOPT,
146 X86_FEATURE_XSAVEOPT,
147 XSAVES,
148 X86_FEATURE_XSAVES,
149 [fx] "D" (fx), "a" (lmask), "d" (hmask) :
150 "memory");
151 asm volatile("2:\n\t"
152 xstate_fault
153 : "0" (0)
154 : "memory");
155
156 return err;
157 }
158
159 /*
160 * Restore processor xstate from xsave area.
161 */
xrstor_state(struct xsave_struct * fx,u64 mask)162 static inline int xrstor_state(struct xsave_struct *fx, u64 mask)
163 {
164 int err = 0;
165 u32 lmask = mask;
166 u32 hmask = mask >> 32;
167
168 /*
169 * Use xrstors to restore context if it is enabled. xrstors supports
170 * compacted format of xsave area which is not supported by xrstor.
171 */
172 alternative_input(
173 "1: " XRSTOR,
174 XRSTORS,
175 X86_FEATURE_XSAVES,
176 "D" (fx), "m" (*fx), "a" (lmask), "d" (hmask)
177 : "memory");
178
179 asm volatile("2:\n"
180 xstate_fault
181 : "0" (0)
182 : "memory");
183
184 return err;
185 }
186
187 /*
188 * Save xstate context for old process during context switch.
189 */
fpu_xsave(struct fpu * fpu)190 static inline void fpu_xsave(struct fpu *fpu)
191 {
192 xsave_state(&fpu->state->xsave, -1);
193 }
194
195 /*
196 * Restore xstate context for new process during context switch.
197 */
fpu_xrstor_checking(struct xsave_struct * fx)198 static inline int fpu_xrstor_checking(struct xsave_struct *fx)
199 {
200 return xrstor_state(fx, -1);
201 }
202
203 /*
204 * Save xstate to user space xsave area.
205 *
206 * We don't use modified optimization because xrstor/xrstors might track
207 * a different application.
208 *
209 * We don't use compacted format xsave area for
210 * backward compatibility for old applications which don't understand
211 * compacted format of xsave area.
212 */
xsave_user(struct xsave_struct __user * buf)213 static inline int xsave_user(struct xsave_struct __user *buf)
214 {
215 int err;
216
217 /*
218 * Clear the xsave header first, so that reserved fields are
219 * initialized to zero.
220 */
221 err = __clear_user(&buf->xsave_hdr, sizeof(buf->xsave_hdr));
222 if (unlikely(err))
223 return -EFAULT;
224
225 __asm__ __volatile__(ASM_STAC "\n"
226 "1:"XSAVE"\n"
227 "2: " ASM_CLAC "\n"
228 xstate_fault
229 : "D" (buf), "a" (-1), "d" (-1), "0" (0)
230 : "memory");
231 return err;
232 }
233
234 /*
235 * Restore xstate from user space xsave area.
236 */
xrestore_user(struct xsave_struct __user * buf,u64 mask)237 static inline int xrestore_user(struct xsave_struct __user *buf, u64 mask)
238 {
239 int err = 0;
240 struct xsave_struct *xstate = ((__force struct xsave_struct *)buf);
241 u32 lmask = mask;
242 u32 hmask = mask >> 32;
243
244 __asm__ __volatile__(ASM_STAC "\n"
245 "1:"XRSTOR"\n"
246 "2: " ASM_CLAC "\n"
247 xstate_fault
248 : "D" (xstate), "a" (lmask), "d" (hmask), "0" (0)
249 : "memory"); /* memory required? */
250 return err;
251 }
252
253 void *get_xsave_addr(struct xsave_struct *xsave, int xstate);
254 void setup_xstate_comp(void);
255
256 #endif
257