1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef _ASM_X86_MSR_H
3 #define _ASM_X86_MSR_H
4
5 #include "msr-index.h"
6
7 #ifndef __ASSEMBLY__
8
9 #include <asm/asm.h>
10 #include <asm/errno.h>
11 #include <asm/cpumask.h>
12 #include <uapi/asm/msr.h>
13
14 struct msr {
15 union {
16 struct {
17 u32 l;
18 u32 h;
19 };
20 u64 q;
21 };
22 };
23
24 struct msr_info {
25 u32 msr_no;
26 struct msr reg;
27 struct msr *msrs;
28 int err;
29 };
30
31 struct msr_regs_info {
32 u32 *regs;
33 int err;
34 };
35
36 struct saved_msr {
37 bool valid;
38 struct msr_info info;
39 };
40
41 struct saved_msrs {
42 unsigned int num;
43 struct saved_msr *array;
44 };
45
46 /*
47 * both i386 and x86_64 returns 64-bit value in edx:eax, but gcc's "A"
48 * constraint has different meanings. For i386, "A" means exactly
49 * edx:eax, while for x86_64 it doesn't mean rdx:rax or edx:eax. Instead,
50 * it means rax *or* rdx.
51 */
52 #ifdef CONFIG_X86_64
53 /* Using 64-bit values saves one instruction clearing the high half of low */
54 #define DECLARE_ARGS(val, low, high) unsigned long low, high
55 #define EAX_EDX_VAL(val, low, high) ((low) | (high) << 32)
56 #define EAX_EDX_RET(val, low, high) "=a" (low), "=d" (high)
57 #else
58 #define DECLARE_ARGS(val, low, high) unsigned long long val
59 #define EAX_EDX_VAL(val, low, high) (val)
60 #define EAX_EDX_RET(val, low, high) "=A" (val)
61 #endif
62
63 /*
64 * Be very careful with includes. This header is prone to include loops.
65 */
66 #include <asm/atomic.h>
67 #include <linux/tracepoint-defs.h>
68
69 #ifdef CONFIG_TRACEPOINTS
70 DECLARE_TRACEPOINT(read_msr);
71 DECLARE_TRACEPOINT(write_msr);
72 DECLARE_TRACEPOINT(rdpmc);
73 extern void do_trace_write_msr(unsigned int msr, u64 val, int failed);
74 extern void do_trace_read_msr(unsigned int msr, u64 val, int failed);
75 extern void do_trace_rdpmc(unsigned int msr, u64 val, int failed);
76 #else
do_trace_write_msr(unsigned int msr,u64 val,int failed)77 static inline void do_trace_write_msr(unsigned int msr, u64 val, int failed) {}
do_trace_read_msr(unsigned int msr,u64 val,int failed)78 static inline void do_trace_read_msr(unsigned int msr, u64 val, int failed) {}
do_trace_rdpmc(unsigned int msr,u64 val,int failed)79 static inline void do_trace_rdpmc(unsigned int msr, u64 val, int failed) {}
80 #endif
81
82 /*
83 * __rdmsr() and __wrmsr() are the two primitives which are the bare minimum MSR
84 * accessors and should not have any tracing or other functionality piggybacking
85 * on them - those are *purely* for accessing MSRs and nothing more. So don't even
86 * think of extending them - you will be slapped with a stinking trout or a frozen
87 * shark will reach you, wherever you are! You've been warned.
88 */
__rdmsr(unsigned int msr)89 static __always_inline unsigned long long __rdmsr(unsigned int msr)
90 {
91 DECLARE_ARGS(val, low, high);
92
93 asm volatile("1: rdmsr\n"
94 "2:\n"
95 _ASM_EXTABLE_TYPE(1b, 2b, EX_TYPE_RDMSR)
96 : EAX_EDX_RET(val, low, high) : "c" (msr));
97
98 return EAX_EDX_VAL(val, low, high);
99 }
100
__wrmsr(unsigned int msr,u32 low,u32 high)101 static __always_inline void __wrmsr(unsigned int msr, u32 low, u32 high)
102 {
103 asm volatile("1: wrmsr\n"
104 "2:\n"
105 _ASM_EXTABLE_TYPE(1b, 2b, EX_TYPE_WRMSR)
106 : : "c" (msr), "a"(low), "d" (high) : "memory");
107 }
108
109 #define native_rdmsr(msr, val1, val2) \
110 do { \
111 u64 __val = __rdmsr((msr)); \
112 (void)((val1) = (u32)__val); \
113 (void)((val2) = (u32)(__val >> 32)); \
114 } while (0)
115
116 #define native_wrmsr(msr, low, high) \
117 __wrmsr(msr, low, high)
118
119 #define native_wrmsrl(msr, val) \
120 __wrmsr((msr), (u32)((u64)(val)), \
121 (u32)((u64)(val) >> 32))
122
native_read_msr(unsigned int msr)123 static inline unsigned long long native_read_msr(unsigned int msr)
124 {
125 unsigned long long val;
126
127 val = __rdmsr(msr);
128
129 if (tracepoint_enabled(read_msr))
130 do_trace_read_msr(msr, val, 0);
131
132 return val;
133 }
134
native_read_msr_safe(unsigned int msr,int * err)135 static inline unsigned long long native_read_msr_safe(unsigned int msr,
136 int *err)
137 {
138 DECLARE_ARGS(val, low, high);
139
140 asm volatile("1: rdmsr ; xor %[err],%[err]\n"
141 "2:\n\t"
142 _ASM_EXTABLE_TYPE_REG(1b, 2b, EX_TYPE_RDMSR_SAFE, %[err])
143 : [err] "=r" (*err), EAX_EDX_RET(val, low, high)
144 : "c" (msr));
145 if (tracepoint_enabled(read_msr))
146 do_trace_read_msr(msr, EAX_EDX_VAL(val, low, high), *err);
147 return EAX_EDX_VAL(val, low, high);
148 }
149
150 /* Can be uninlined because referenced by paravirt */
151 static inline void notrace
native_write_msr(unsigned int msr,u32 low,u32 high)152 native_write_msr(unsigned int msr, u32 low, u32 high)
153 {
154 __wrmsr(msr, low, high);
155
156 if (tracepoint_enabled(write_msr))
157 do_trace_write_msr(msr, ((u64)high << 32 | low), 0);
158 }
159
160 /* Can be uninlined because referenced by paravirt */
161 static inline int notrace
native_write_msr_safe(unsigned int msr,u32 low,u32 high)162 native_write_msr_safe(unsigned int msr, u32 low, u32 high)
163 {
164 int err;
165
166 asm volatile("1: wrmsr ; xor %[err],%[err]\n"
167 "2:\n\t"
168 _ASM_EXTABLE_TYPE_REG(1b, 2b, EX_TYPE_WRMSR_SAFE, %[err])
169 : [err] "=a" (err)
170 : "c" (msr), "0" (low), "d" (high)
171 : "memory");
172 if (tracepoint_enabled(write_msr))
173 do_trace_write_msr(msr, ((u64)high << 32 | low), err);
174 return err;
175 }
176
177 extern int rdmsr_safe_regs(u32 regs[8]);
178 extern int wrmsr_safe_regs(u32 regs[8]);
179
180 /**
181 * rdtsc() - returns the current TSC without ordering constraints
182 *
183 * rdtsc() returns the result of RDTSC as a 64-bit integer. The
184 * only ordering constraint it supplies is the ordering implied by
185 * "asm volatile": it will put the RDTSC in the place you expect. The
186 * CPU can and will speculatively execute that RDTSC, though, so the
187 * results can be non-monotonic if compared on different CPUs.
188 */
rdtsc(void)189 static __always_inline unsigned long long rdtsc(void)
190 {
191 DECLARE_ARGS(val, low, high);
192
193 asm volatile("rdtsc" : EAX_EDX_RET(val, low, high));
194
195 return EAX_EDX_VAL(val, low, high);
196 }
197
198 /**
199 * rdtsc_ordered() - read the current TSC in program order
200 *
201 * rdtsc_ordered() returns the result of RDTSC as a 64-bit integer.
202 * It is ordered like a load to a global in-memory counter. It should
203 * be impossible to observe non-monotonic rdtsc_unordered() behavior
204 * across multiple CPUs as long as the TSC is synced.
205 */
rdtsc_ordered(void)206 static __always_inline unsigned long long rdtsc_ordered(void)
207 {
208 DECLARE_ARGS(val, low, high);
209
210 /*
211 * The RDTSC instruction is not ordered relative to memory
212 * access. The Intel SDM and the AMD APM are both vague on this
213 * point, but empirically an RDTSC instruction can be
214 * speculatively executed before prior loads. An RDTSC
215 * immediately after an appropriate barrier appears to be
216 * ordered as a normal load, that is, it provides the same
217 * ordering guarantees as reading from a global memory location
218 * that some other imaginary CPU is updating continuously with a
219 * time stamp.
220 *
221 * Thus, use the preferred barrier on the respective CPU, aiming for
222 * RDTSCP as the default.
223 */
224 asm volatile(ALTERNATIVE_2("rdtsc",
225 "lfence; rdtsc", X86_FEATURE_LFENCE_RDTSC,
226 "rdtscp", X86_FEATURE_RDTSCP)
227 : EAX_EDX_RET(val, low, high)
228 /* RDTSCP clobbers ECX with MSR_TSC_AUX. */
229 :: "ecx");
230
231 return EAX_EDX_VAL(val, low, high);
232 }
233
native_read_pmc(int counter)234 static inline unsigned long long native_read_pmc(int counter)
235 {
236 DECLARE_ARGS(val, low, high);
237
238 asm volatile("rdpmc" : EAX_EDX_RET(val, low, high) : "c" (counter));
239 if (tracepoint_enabled(rdpmc))
240 do_trace_rdpmc(counter, EAX_EDX_VAL(val, low, high), 0);
241 return EAX_EDX_VAL(val, low, high);
242 }
243
244 #ifdef CONFIG_PARAVIRT_XXL
245 #include <asm/paravirt.h>
246 #else
247 #include <linux/errno.h>
248 /*
249 * Access to machine-specific registers (available on 586 and better only)
250 * Note: the rd* operations modify the parameters directly (without using
251 * pointer indirection), this allows gcc to optimize better
252 */
253
254 #define rdmsr(msr, low, high) \
255 do { \
256 u64 __val = native_read_msr((msr)); \
257 (void)((low) = (u32)__val); \
258 (void)((high) = (u32)(__val >> 32)); \
259 } while (0)
260
wrmsr(unsigned int msr,u32 low,u32 high)261 static inline void wrmsr(unsigned int msr, u32 low, u32 high)
262 {
263 native_write_msr(msr, low, high);
264 }
265
266 #define rdmsrl(msr, val) \
267 ((val) = native_read_msr((msr)))
268
wrmsrl(unsigned int msr,u64 val)269 static inline void wrmsrl(unsigned int msr, u64 val)
270 {
271 native_write_msr(msr, (u32)(val & 0xffffffffULL), (u32)(val >> 32));
272 }
273
274 /* wrmsr with exception handling */
wrmsr_safe(unsigned int msr,u32 low,u32 high)275 static inline int wrmsr_safe(unsigned int msr, u32 low, u32 high)
276 {
277 return native_write_msr_safe(msr, low, high);
278 }
279
280 /* rdmsr with exception handling */
281 #define rdmsr_safe(msr, low, high) \
282 ({ \
283 int __err; \
284 u64 __val = native_read_msr_safe((msr), &__err); \
285 (*low) = (u32)__val; \
286 (*high) = (u32)(__val >> 32); \
287 __err; \
288 })
289
rdmsrl_safe(unsigned int msr,unsigned long long * p)290 static inline int rdmsrl_safe(unsigned int msr, unsigned long long *p)
291 {
292 int err;
293
294 *p = native_read_msr_safe(msr, &err);
295 return err;
296 }
297
298 #define rdpmc(counter, low, high) \
299 do { \
300 u64 _l = native_read_pmc((counter)); \
301 (low) = (u32)_l; \
302 (high) = (u32)(_l >> 32); \
303 } while (0)
304
305 #define rdpmcl(counter, val) ((val) = native_read_pmc(counter))
306
307 #endif /* !CONFIG_PARAVIRT_XXL */
308
309 /*
310 * 64-bit version of wrmsr_safe():
311 */
wrmsrl_safe(u32 msr,u64 val)312 static inline int wrmsrl_safe(u32 msr, u64 val)
313 {
314 return wrmsr_safe(msr, (u32)val, (u32)(val >> 32));
315 }
316
317 struct msr *msrs_alloc(void);
318 void msrs_free(struct msr *msrs);
319 int msr_set_bit(u32 msr, u8 bit);
320 int msr_clear_bit(u32 msr, u8 bit);
321
322 #ifdef CONFIG_SMP
323 int rdmsr_on_cpu(unsigned int cpu, u32 msr_no, u32 *l, u32 *h);
324 int wrmsr_on_cpu(unsigned int cpu, u32 msr_no, u32 l, u32 h);
325 int rdmsrl_on_cpu(unsigned int cpu, u32 msr_no, u64 *q);
326 int wrmsrl_on_cpu(unsigned int cpu, u32 msr_no, u64 q);
327 void rdmsr_on_cpus(const struct cpumask *mask, u32 msr_no, struct msr *msrs);
328 void wrmsr_on_cpus(const struct cpumask *mask, u32 msr_no, struct msr *msrs);
329 int rdmsr_safe_on_cpu(unsigned int cpu, u32 msr_no, u32 *l, u32 *h);
330 int wrmsr_safe_on_cpu(unsigned int cpu, u32 msr_no, u32 l, u32 h);
331 int rdmsrl_safe_on_cpu(unsigned int cpu, u32 msr_no, u64 *q);
332 int wrmsrl_safe_on_cpu(unsigned int cpu, u32 msr_no, u64 q);
333 int rdmsr_safe_regs_on_cpu(unsigned int cpu, u32 regs[8]);
334 int wrmsr_safe_regs_on_cpu(unsigned int cpu, u32 regs[8]);
335 #else /* CONFIG_SMP */
rdmsr_on_cpu(unsigned int cpu,u32 msr_no,u32 * l,u32 * h)336 static inline int rdmsr_on_cpu(unsigned int cpu, u32 msr_no, u32 *l, u32 *h)
337 {
338 rdmsr(msr_no, *l, *h);
339 return 0;
340 }
wrmsr_on_cpu(unsigned int cpu,u32 msr_no,u32 l,u32 h)341 static inline int wrmsr_on_cpu(unsigned int cpu, u32 msr_no, u32 l, u32 h)
342 {
343 wrmsr(msr_no, l, h);
344 return 0;
345 }
rdmsrl_on_cpu(unsigned int cpu,u32 msr_no,u64 * q)346 static inline int rdmsrl_on_cpu(unsigned int cpu, u32 msr_no, u64 *q)
347 {
348 rdmsrl(msr_no, *q);
349 return 0;
350 }
wrmsrl_on_cpu(unsigned int cpu,u32 msr_no,u64 q)351 static inline int wrmsrl_on_cpu(unsigned int cpu, u32 msr_no, u64 q)
352 {
353 wrmsrl(msr_no, q);
354 return 0;
355 }
rdmsr_on_cpus(const struct cpumask * m,u32 msr_no,struct msr * msrs)356 static inline void rdmsr_on_cpus(const struct cpumask *m, u32 msr_no,
357 struct msr *msrs)
358 {
359 rdmsr_on_cpu(0, msr_no, &(msrs[0].l), &(msrs[0].h));
360 }
wrmsr_on_cpus(const struct cpumask * m,u32 msr_no,struct msr * msrs)361 static inline void wrmsr_on_cpus(const struct cpumask *m, u32 msr_no,
362 struct msr *msrs)
363 {
364 wrmsr_on_cpu(0, msr_no, msrs[0].l, msrs[0].h);
365 }
rdmsr_safe_on_cpu(unsigned int cpu,u32 msr_no,u32 * l,u32 * h)366 static inline int rdmsr_safe_on_cpu(unsigned int cpu, u32 msr_no,
367 u32 *l, u32 *h)
368 {
369 return rdmsr_safe(msr_no, l, h);
370 }
wrmsr_safe_on_cpu(unsigned int cpu,u32 msr_no,u32 l,u32 h)371 static inline int wrmsr_safe_on_cpu(unsigned int cpu, u32 msr_no, u32 l, u32 h)
372 {
373 return wrmsr_safe(msr_no, l, h);
374 }
rdmsrl_safe_on_cpu(unsigned int cpu,u32 msr_no,u64 * q)375 static inline int rdmsrl_safe_on_cpu(unsigned int cpu, u32 msr_no, u64 *q)
376 {
377 return rdmsrl_safe(msr_no, q);
378 }
wrmsrl_safe_on_cpu(unsigned int cpu,u32 msr_no,u64 q)379 static inline int wrmsrl_safe_on_cpu(unsigned int cpu, u32 msr_no, u64 q)
380 {
381 return wrmsrl_safe(msr_no, q);
382 }
rdmsr_safe_regs_on_cpu(unsigned int cpu,u32 regs[8])383 static inline int rdmsr_safe_regs_on_cpu(unsigned int cpu, u32 regs[8])
384 {
385 return rdmsr_safe_regs(regs);
386 }
wrmsr_safe_regs_on_cpu(unsigned int cpu,u32 regs[8])387 static inline int wrmsr_safe_regs_on_cpu(unsigned int cpu, u32 regs[8])
388 {
389 return wrmsr_safe_regs(regs);
390 }
391 #endif /* CONFIG_SMP */
392 #endif /* __ASSEMBLY__ */
393 #endif /* _ASM_X86_MSR_H */
394