1 // SPDX-License-Identifier: GPL-2.0-only
2 #define pr_fmt(fmt) "SMP alternatives: " fmt
3
4 #include <linux/module.h>
5 #include <linux/sched.h>
6 #include <linux/perf_event.h>
7 #include <linux/mutex.h>
8 #include <linux/list.h>
9 #include <linux/stringify.h>
10 #include <linux/highmem.h>
11 #include <linux/mm.h>
12 #include <linux/vmalloc.h>
13 #include <linux/memory.h>
14 #include <linux/stop_machine.h>
15 #include <linux/slab.h>
16 #include <linux/kdebug.h>
17 #include <linux/kprobes.h>
18 #include <linux/mmu_context.h>
19 #include <linux/bsearch.h>
20 #include <linux/sync_core.h>
21 #include <asm/text-patching.h>
22 #include <asm/alternative.h>
23 #include <asm/sections.h>
24 #include <asm/mce.h>
25 #include <asm/nmi.h>
26 #include <asm/cacheflush.h>
27 #include <asm/tlbflush.h>
28 #include <asm/insn.h>
29 #include <asm/io.h>
30 #include <asm/fixmap.h>
31 #include <asm/asm-prototypes.h>
32
33 int __read_mostly alternatives_patched;
34
35 EXPORT_SYMBOL_GPL(alternatives_patched);
36
37 #define MAX_PATCH_LEN (255-1)
38
39 static int __initdata_or_module debug_alternative;
40
debug_alt(char * str)41 static int __init debug_alt(char *str)
42 {
43 debug_alternative = 1;
44 return 1;
45 }
46 __setup("debug-alternative", debug_alt);
47
48 static int noreplace_smp;
49
setup_noreplace_smp(char * str)50 static int __init setup_noreplace_smp(char *str)
51 {
52 noreplace_smp = 1;
53 return 1;
54 }
55 __setup("noreplace-smp", setup_noreplace_smp);
56
57 #define DPRINTK(fmt, args...) \
58 do { \
59 if (debug_alternative) \
60 printk(KERN_DEBUG pr_fmt(fmt) "\n", ##args); \
61 } while (0)
62
63 #define DUMP_BYTES(buf, len, fmt, args...) \
64 do { \
65 if (unlikely(debug_alternative)) { \
66 int j; \
67 \
68 if (!(len)) \
69 break; \
70 \
71 printk(KERN_DEBUG pr_fmt(fmt), ##args); \
72 for (j = 0; j < (len) - 1; j++) \
73 printk(KERN_CONT "%02hhx ", buf[j]); \
74 printk(KERN_CONT "%02hhx\n", buf[j]); \
75 } \
76 } while (0)
77
78 /*
79 * Each GENERIC_NOPX is of X bytes, and defined as an array of bytes
80 * that correspond to that nop. Getting from one nop to the next, we
81 * add to the array the offset that is equal to the sum of all sizes of
82 * nops preceding the one we are after.
83 *
84 * Note: The GENERIC_NOP5_ATOMIC is at the end, as it breaks the
85 * nice symmetry of sizes of the previous nops.
86 */
87 #if defined(GENERIC_NOP1) && !defined(CONFIG_X86_64)
88 static const unsigned char intelnops[] =
89 {
90 GENERIC_NOP1,
91 GENERIC_NOP2,
92 GENERIC_NOP3,
93 GENERIC_NOP4,
94 GENERIC_NOP5,
95 GENERIC_NOP6,
96 GENERIC_NOP7,
97 GENERIC_NOP8,
98 GENERIC_NOP5_ATOMIC
99 };
100 static const unsigned char * const intel_nops[ASM_NOP_MAX+2] =
101 {
102 NULL,
103 intelnops,
104 intelnops + 1,
105 intelnops + 1 + 2,
106 intelnops + 1 + 2 + 3,
107 intelnops + 1 + 2 + 3 + 4,
108 intelnops + 1 + 2 + 3 + 4 + 5,
109 intelnops + 1 + 2 + 3 + 4 + 5 + 6,
110 intelnops + 1 + 2 + 3 + 4 + 5 + 6 + 7,
111 intelnops + 1 + 2 + 3 + 4 + 5 + 6 + 7 + 8,
112 };
113 #endif
114
115 #ifdef K8_NOP1
116 static const unsigned char k8nops[] =
117 {
118 K8_NOP1,
119 K8_NOP2,
120 K8_NOP3,
121 K8_NOP4,
122 K8_NOP5,
123 K8_NOP6,
124 K8_NOP7,
125 K8_NOP8,
126 K8_NOP5_ATOMIC
127 };
128 static const unsigned char * const k8_nops[ASM_NOP_MAX+2] =
129 {
130 NULL,
131 k8nops,
132 k8nops + 1,
133 k8nops + 1 + 2,
134 k8nops + 1 + 2 + 3,
135 k8nops + 1 + 2 + 3 + 4,
136 k8nops + 1 + 2 + 3 + 4 + 5,
137 k8nops + 1 + 2 + 3 + 4 + 5 + 6,
138 k8nops + 1 + 2 + 3 + 4 + 5 + 6 + 7,
139 k8nops + 1 + 2 + 3 + 4 + 5 + 6 + 7 + 8,
140 };
141 #endif
142
143 #if defined(K7_NOP1) && !defined(CONFIG_X86_64)
144 static const unsigned char k7nops[] =
145 {
146 K7_NOP1,
147 K7_NOP2,
148 K7_NOP3,
149 K7_NOP4,
150 K7_NOP5,
151 K7_NOP6,
152 K7_NOP7,
153 K7_NOP8,
154 K7_NOP5_ATOMIC
155 };
156 static const unsigned char * const k7_nops[ASM_NOP_MAX+2] =
157 {
158 NULL,
159 k7nops,
160 k7nops + 1,
161 k7nops + 1 + 2,
162 k7nops + 1 + 2 + 3,
163 k7nops + 1 + 2 + 3 + 4,
164 k7nops + 1 + 2 + 3 + 4 + 5,
165 k7nops + 1 + 2 + 3 + 4 + 5 + 6,
166 k7nops + 1 + 2 + 3 + 4 + 5 + 6 + 7,
167 k7nops + 1 + 2 + 3 + 4 + 5 + 6 + 7 + 8,
168 };
169 #endif
170
171 #ifdef P6_NOP1
172 static const unsigned char p6nops[] =
173 {
174 P6_NOP1,
175 P6_NOP2,
176 P6_NOP3,
177 P6_NOP4,
178 P6_NOP5,
179 P6_NOP6,
180 P6_NOP7,
181 P6_NOP8,
182 P6_NOP5_ATOMIC
183 };
184 static const unsigned char * const p6_nops[ASM_NOP_MAX+2] =
185 {
186 NULL,
187 p6nops,
188 p6nops + 1,
189 p6nops + 1 + 2,
190 p6nops + 1 + 2 + 3,
191 p6nops + 1 + 2 + 3 + 4,
192 p6nops + 1 + 2 + 3 + 4 + 5,
193 p6nops + 1 + 2 + 3 + 4 + 5 + 6,
194 p6nops + 1 + 2 + 3 + 4 + 5 + 6 + 7,
195 p6nops + 1 + 2 + 3 + 4 + 5 + 6 + 7 + 8,
196 };
197 #endif
198
199 /* Initialize these to a safe default */
200 #ifdef CONFIG_X86_64
201 const unsigned char * const *ideal_nops = p6_nops;
202 #else
203 const unsigned char * const *ideal_nops = intel_nops;
204 #endif
205
arch_init_ideal_nops(void)206 void __init arch_init_ideal_nops(void)
207 {
208 switch (boot_cpu_data.x86_vendor) {
209 case X86_VENDOR_INTEL:
210 /*
211 * Due to a decoder implementation quirk, some
212 * specific Intel CPUs actually perform better with
213 * the "k8_nops" than with the SDM-recommended NOPs.
214 */
215 if (boot_cpu_data.x86 == 6 &&
216 boot_cpu_data.x86_model >= 0x0f &&
217 boot_cpu_data.x86_model != 0x1c &&
218 boot_cpu_data.x86_model != 0x26 &&
219 boot_cpu_data.x86_model != 0x27 &&
220 boot_cpu_data.x86_model < 0x30) {
221 ideal_nops = k8_nops;
222 } else if (boot_cpu_has(X86_FEATURE_NOPL)) {
223 ideal_nops = p6_nops;
224 } else {
225 #ifdef CONFIG_X86_64
226 ideal_nops = k8_nops;
227 #else
228 ideal_nops = intel_nops;
229 #endif
230 }
231 break;
232
233 case X86_VENDOR_HYGON:
234 ideal_nops = p6_nops;
235 return;
236
237 case X86_VENDOR_AMD:
238 if (boot_cpu_data.x86 > 0xf) {
239 ideal_nops = p6_nops;
240 return;
241 }
242
243 fallthrough;
244
245 default:
246 #ifdef CONFIG_X86_64
247 ideal_nops = k8_nops;
248 #else
249 if (boot_cpu_has(X86_FEATURE_K8))
250 ideal_nops = k8_nops;
251 else if (boot_cpu_has(X86_FEATURE_K7))
252 ideal_nops = k7_nops;
253 else
254 ideal_nops = intel_nops;
255 #endif
256 }
257 }
258
259 /* Use this to add nops to a buffer, then text_poke the whole buffer. */
add_nops(void * insns,unsigned int len)260 static void __init_or_module add_nops(void *insns, unsigned int len)
261 {
262 while (len > 0) {
263 unsigned int noplen = len;
264 if (noplen > ASM_NOP_MAX)
265 noplen = ASM_NOP_MAX;
266 memcpy(insns, ideal_nops[noplen], noplen);
267 insns += noplen;
268 len -= noplen;
269 }
270 }
271
272 extern s32 __retpoline_sites[], __retpoline_sites_end[];
273 extern s32 __return_sites[], __return_sites_end[];
274 extern struct alt_instr __alt_instructions[], __alt_instructions_end[];
275 extern s32 __smp_locks[], __smp_locks_end[];
276 void text_poke_early(void *addr, const void *opcode, size_t len);
277
278 /*
279 * Are we looking at a near JMP with a 1 or 4-byte displacement.
280 */
is_jmp(const u8 opcode)281 static inline bool is_jmp(const u8 opcode)
282 {
283 return opcode == 0xeb || opcode == 0xe9;
284 }
285
286 static void __init_or_module
recompute_jump(struct alt_instr * a,u8 * orig_insn,u8 * repl_insn,u8 * insn_buff)287 recompute_jump(struct alt_instr *a, u8 *orig_insn, u8 *repl_insn, u8 *insn_buff)
288 {
289 u8 *next_rip, *tgt_rip;
290 s32 n_dspl, o_dspl;
291 int repl_len;
292
293 if (a->replacementlen != 5)
294 return;
295
296 o_dspl = *(s32 *)(insn_buff + 1);
297
298 /* next_rip of the replacement JMP */
299 next_rip = repl_insn + a->replacementlen;
300 /* target rip of the replacement JMP */
301 tgt_rip = next_rip + o_dspl;
302 n_dspl = tgt_rip - orig_insn;
303
304 DPRINTK("target RIP: %px, new_displ: 0x%x", tgt_rip, n_dspl);
305
306 if (tgt_rip - orig_insn >= 0) {
307 if (n_dspl - 2 <= 127)
308 goto two_byte_jmp;
309 else
310 goto five_byte_jmp;
311 /* negative offset */
312 } else {
313 if (((n_dspl - 2) & 0xff) == (n_dspl - 2))
314 goto two_byte_jmp;
315 else
316 goto five_byte_jmp;
317 }
318
319 two_byte_jmp:
320 n_dspl -= 2;
321
322 insn_buff[0] = 0xeb;
323 insn_buff[1] = (s8)n_dspl;
324 add_nops(insn_buff + 2, 3);
325
326 repl_len = 2;
327 goto done;
328
329 five_byte_jmp:
330 n_dspl -= 5;
331
332 insn_buff[0] = 0xe9;
333 *(s32 *)&insn_buff[1] = n_dspl;
334
335 repl_len = 5;
336
337 done:
338
339 DPRINTK("final displ: 0x%08x, JMP 0x%lx",
340 n_dspl, (unsigned long)orig_insn + n_dspl + repl_len);
341 }
342
343 /*
344 * optimize_nops_range() - Optimize a sequence of single byte NOPs (0x90)
345 *
346 * @instr: instruction byte stream
347 * @instrlen: length of the above
348 * @off: offset within @instr where the first NOP has been detected
349 *
350 * Return: number of NOPs found (and replaced).
351 */
optimize_nops_range(u8 * instr,u8 instrlen,int off)352 static __always_inline int optimize_nops_range(u8 *instr, u8 instrlen, int off)
353 {
354 unsigned long flags;
355 int i = off, nnops;
356
357 while (i < instrlen) {
358 if (instr[i] != 0x90)
359 break;
360
361 i++;
362 }
363
364 nnops = i - off;
365
366 if (nnops <= 1)
367 return nnops;
368
369 local_irq_save(flags);
370 add_nops(instr + off, nnops);
371 local_irq_restore(flags);
372
373 DUMP_BYTES(instr, instrlen, "%px: [%d:%d) optimized NOPs: ", instr, off, i);
374
375 return nnops;
376 }
377
378 /*
379 * "noinline" to cause control flow change and thus invalidate I$ and
380 * cause refetch after modification.
381 */
optimize_nops(u8 * instr,size_t len)382 static void __init_or_module noinline optimize_nops(u8 *instr, size_t len)
383 {
384 struct insn insn;
385 int i = 0;
386
387 /*
388 * Jump over the non-NOP insns and optimize single-byte NOPs into bigger
389 * ones.
390 */
391 for (;;) {
392 if (insn_decode_kernel(&insn, &instr[i]))
393 return;
394
395 /*
396 * See if this and any potentially following NOPs can be
397 * optimized.
398 */
399 if (insn.length == 1 && insn.opcode.bytes[0] == 0x90)
400 i += optimize_nops_range(instr, len, i);
401 else
402 i += insn.length;
403
404 if (i >= len)
405 return;
406 }
407 }
408
409 /*
410 * Replace instructions with better alternatives for this CPU type. This runs
411 * before SMP is initialized to avoid SMP problems with self modifying code.
412 * This implies that asymmetric systems where APs have less capabilities than
413 * the boot processor are not handled. Tough. Make sure you disable such
414 * features by hand.
415 *
416 * Marked "noinline" to cause control flow change and thus insn cache
417 * to refetch changed I$ lines.
418 */
apply_alternatives(struct alt_instr * start,struct alt_instr * end)419 void __init_or_module noinline apply_alternatives(struct alt_instr *start,
420 struct alt_instr *end)
421 {
422 struct alt_instr *a;
423 u8 *instr, *replacement;
424 u8 insn_buff[MAX_PATCH_LEN];
425
426 DPRINTK("alt table %px, -> %px", start, end);
427 /*
428 * The scan order should be from start to end. A later scanned
429 * alternative code can overwrite previously scanned alternative code.
430 * Some kernel functions (e.g. memcpy, memset, etc) use this order to
431 * patch code.
432 *
433 * So be careful if you want to change the scan order to any other
434 * order.
435 */
436 for (a = start; a < end; a++) {
437 int insn_buff_sz = 0;
438 /* Mask away "NOT" flag bit for feature to test. */
439 u16 feature = a->cpuid & ~ALTINSTR_FLAG_INV;
440
441 instr = (u8 *)&a->instr_offset + a->instr_offset;
442 replacement = (u8 *)&a->repl_offset + a->repl_offset;
443 BUG_ON(a->instrlen > sizeof(insn_buff));
444 BUG_ON(feature >= (NCAPINTS + NBUGINTS) * 32);
445
446 /*
447 * Patch if either:
448 * - feature is present
449 * - feature not present but ALTINSTR_FLAG_INV is set to mean,
450 * patch if feature is *NOT* present.
451 */
452 if (!boot_cpu_has(feature) == !(a->cpuid & ALTINSTR_FLAG_INV))
453 goto next;
454
455 DPRINTK("feat: %s%d*32+%d, old: (%pS (%px) len: %d), repl: (%px, len: %d)",
456 (a->cpuid & ALTINSTR_FLAG_INV) ? "!" : "",
457 feature >> 5,
458 feature & 0x1f,
459 instr, instr, a->instrlen,
460 replacement, a->replacementlen);
461
462 DUMP_BYTES(instr, a->instrlen, "%px: old_insn: ", instr);
463 DUMP_BYTES(replacement, a->replacementlen, "%px: rpl_insn: ", replacement);
464
465 memcpy(insn_buff, replacement, a->replacementlen);
466 insn_buff_sz = a->replacementlen;
467
468 /*
469 * 0xe8 is a relative jump; fix the offset.
470 *
471 * Instruction length is checked before the opcode to avoid
472 * accessing uninitialized bytes for zero-length replacements.
473 */
474 if (a->replacementlen == 5 && *insn_buff == 0xe8) {
475 *(s32 *)(insn_buff + 1) += replacement - instr;
476 DPRINTK("Fix CALL offset: 0x%x, CALL 0x%lx",
477 *(s32 *)(insn_buff + 1),
478 (unsigned long)instr + *(s32 *)(insn_buff + 1) + 5);
479 }
480
481 if (a->replacementlen && is_jmp(replacement[0]))
482 recompute_jump(a, instr, replacement, insn_buff);
483
484 for (; insn_buff_sz < a->instrlen; insn_buff_sz++)
485 insn_buff[insn_buff_sz] = 0x90;
486
487 DUMP_BYTES(insn_buff, insn_buff_sz, "%px: final_insn: ", instr);
488
489 text_poke_early(instr, insn_buff, insn_buff_sz);
490
491 next:
492 optimize_nops(instr, a->instrlen);
493 }
494 }
495
496 #if defined(CONFIG_RETPOLINE) && defined(CONFIG_STACK_VALIDATION)
497
498 /*
499 * CALL/JMP *%\reg
500 */
emit_indirect(int op,int reg,u8 * bytes)501 static int emit_indirect(int op, int reg, u8 *bytes)
502 {
503 int i = 0;
504 u8 modrm;
505
506 switch (op) {
507 case CALL_INSN_OPCODE:
508 modrm = 0x10; /* Reg = 2; CALL r/m */
509 break;
510
511 case JMP32_INSN_OPCODE:
512 modrm = 0x20; /* Reg = 4; JMP r/m */
513 break;
514
515 default:
516 WARN_ON_ONCE(1);
517 return -1;
518 }
519
520 if (reg >= 8) {
521 bytes[i++] = 0x41; /* REX.B prefix */
522 reg -= 8;
523 }
524
525 modrm |= 0xc0; /* Mod = 3 */
526 modrm += reg;
527
528 bytes[i++] = 0xff; /* opcode */
529 bytes[i++] = modrm;
530
531 return i;
532 }
533
534 /*
535 * Rewrite the compiler generated retpoline thunk calls.
536 *
537 * For spectre_v2=off (!X86_FEATURE_RETPOLINE), rewrite them into immediate
538 * indirect instructions, avoiding the extra indirection.
539 *
540 * For example, convert:
541 *
542 * CALL __x86_indirect_thunk_\reg
543 *
544 * into:
545 *
546 * CALL *%\reg
547 *
548 */
patch_retpoline(void * addr,struct insn * insn,u8 * bytes)549 static int patch_retpoline(void *addr, struct insn *insn, u8 *bytes)
550 {
551 retpoline_thunk_t *target;
552 int reg, i = 0;
553
554 target = addr + insn->length + insn->immediate.value;
555 reg = target - __x86_indirect_thunk_array;
556
557 if (WARN_ON_ONCE(reg & ~0xf))
558 return -1;
559
560 /* If anyone ever does: CALL/JMP *%rsp, we're in deep trouble. */
561 BUG_ON(reg == 4);
562
563 if (cpu_feature_enabled(X86_FEATURE_RETPOLINE))
564 return -1;
565
566 i = emit_indirect(insn->opcode.bytes[0], reg, bytes);
567 if (i < 0)
568 return i;
569
570 for (; i < insn->length;)
571 bytes[i++] = 0x90;
572
573 return i;
574 }
575
576 /*
577 * Generated by 'objtool --retpoline'.
578 */
apply_retpolines(s32 * start,s32 * end)579 void __init_or_module noinline apply_retpolines(s32 *start, s32 *end)
580 {
581 s32 *s;
582
583 for (s = start; s < end; s++) {
584 void *addr = (void *)s + *s;
585 struct insn insn;
586 int len, ret;
587 u8 bytes[16];
588 u8 op1, op2;
589
590 ret = insn_decode_kernel(&insn, addr);
591 if (WARN_ON_ONCE(ret < 0))
592 continue;
593
594 op1 = insn.opcode.bytes[0];
595 op2 = insn.opcode.bytes[1];
596
597 switch (op1) {
598 case CALL_INSN_OPCODE:
599 case JMP32_INSN_OPCODE:
600 break;
601
602 default:
603 WARN_ON_ONCE(1);
604 continue;
605 }
606
607 len = patch_retpoline(addr, &insn, bytes);
608 if (len == insn.length) {
609 optimize_nops(bytes, len);
610 text_poke_early(addr, bytes, len);
611 }
612 }
613 }
614
615 #ifdef CONFIG_RETHUNK
616 /*
617 * Rewrite the compiler generated return thunk tail-calls.
618 *
619 * For example, convert:
620 *
621 * JMP __x86_return_thunk
622 *
623 * into:
624 *
625 * RET
626 */
patch_return(void * addr,struct insn * insn,u8 * bytes)627 static int patch_return(void *addr, struct insn *insn, u8 *bytes)
628 {
629 int i = 0;
630
631 if (cpu_feature_enabled(X86_FEATURE_RETHUNK))
632 return -1;
633
634 bytes[i++] = RET_INSN_OPCODE;
635
636 for (; i < insn->length;)
637 bytes[i++] = INT3_INSN_OPCODE;
638
639 return i;
640 }
641
apply_returns(s32 * start,s32 * end)642 void __init_or_module noinline apply_returns(s32 *start, s32 *end)
643 {
644 s32 *s;
645
646 for (s = start; s < end; s++) {
647 void *dest = NULL, *addr = (void *)s + *s;
648 struct insn insn;
649 int len, ret;
650 u8 bytes[16];
651 u8 op;
652
653 ret = insn_decode_kernel(&insn, addr);
654 if (WARN_ON_ONCE(ret < 0))
655 continue;
656
657 op = insn.opcode.bytes[0];
658 if (op == JMP32_INSN_OPCODE)
659 dest = addr + insn.length + insn.immediate.value;
660
661 if (__static_call_fixup(addr, op, dest) ||
662 WARN_ON_ONCE(dest != &__x86_return_thunk))
663 continue;
664
665 DPRINTK("return thunk at: %pS (%px) len: %d to: %pS",
666 addr, addr, insn.length,
667 addr + insn.length + insn.immediate.value);
668
669 len = patch_return(addr, &insn, bytes);
670 if (len == insn.length) {
671 DUMP_BYTES(((u8*)addr), len, "%px: orig: ", addr);
672 DUMP_BYTES(((u8*)bytes), len, "%px: repl: ", addr);
673 text_poke_early(addr, bytes, len);
674 }
675 }
676 }
677 #else
apply_returns(s32 * start,s32 * end)678 void __init_or_module noinline apply_returns(s32 *start, s32 *end) { }
679 #endif /* CONFIG_RETHUNK */
680
681 #else /* !RETPOLINES || !CONFIG_STACK_VALIDATION */
682
apply_retpolines(s32 * start,s32 * end)683 void __init_or_module noinline apply_retpolines(s32 *start, s32 *end) { }
apply_returns(s32 * start,s32 * end)684 void __init_or_module noinline apply_returns(s32 *start, s32 *end) { }
685
686 #endif /* CONFIG_RETPOLINE && CONFIG_STACK_VALIDATION */
687
688 #ifdef CONFIG_SMP
alternatives_smp_lock(const s32 * start,const s32 * end,u8 * text,u8 * text_end)689 static void alternatives_smp_lock(const s32 *start, const s32 *end,
690 u8 *text, u8 *text_end)
691 {
692 const s32 *poff;
693
694 for (poff = start; poff < end; poff++) {
695 u8 *ptr = (u8 *)poff + *poff;
696
697 if (!*poff || ptr < text || ptr >= text_end)
698 continue;
699 /* turn DS segment override prefix into lock prefix */
700 if (*ptr == 0x3e)
701 text_poke(ptr, ((unsigned char []){0xf0}), 1);
702 }
703 }
704
alternatives_smp_unlock(const s32 * start,const s32 * end,u8 * text,u8 * text_end)705 static void alternatives_smp_unlock(const s32 *start, const s32 *end,
706 u8 *text, u8 *text_end)
707 {
708 const s32 *poff;
709
710 for (poff = start; poff < end; poff++) {
711 u8 *ptr = (u8 *)poff + *poff;
712
713 if (!*poff || ptr < text || ptr >= text_end)
714 continue;
715 /* turn lock prefix into DS segment override prefix */
716 if (*ptr == 0xf0)
717 text_poke(ptr, ((unsigned char []){0x3E}), 1);
718 }
719 }
720
721 struct smp_alt_module {
722 /* what is this ??? */
723 struct module *mod;
724 char *name;
725
726 /* ptrs to lock prefixes */
727 const s32 *locks;
728 const s32 *locks_end;
729
730 /* .text segment, needed to avoid patching init code ;) */
731 u8 *text;
732 u8 *text_end;
733
734 struct list_head next;
735 };
736 static LIST_HEAD(smp_alt_modules);
737 static bool uniproc_patched = false; /* protected by text_mutex */
738
alternatives_smp_module_add(struct module * mod,char * name,void * locks,void * locks_end,void * text,void * text_end)739 void __init_or_module alternatives_smp_module_add(struct module *mod,
740 char *name,
741 void *locks, void *locks_end,
742 void *text, void *text_end)
743 {
744 struct smp_alt_module *smp;
745
746 mutex_lock(&text_mutex);
747 if (!uniproc_patched)
748 goto unlock;
749
750 if (num_possible_cpus() == 1)
751 /* Don't bother remembering, we'll never have to undo it. */
752 goto smp_unlock;
753
754 smp = kzalloc(sizeof(*smp), GFP_KERNEL);
755 if (NULL == smp)
756 /* we'll run the (safe but slow) SMP code then ... */
757 goto unlock;
758
759 smp->mod = mod;
760 smp->name = name;
761 smp->locks = locks;
762 smp->locks_end = locks_end;
763 smp->text = text;
764 smp->text_end = text_end;
765 DPRINTK("locks %p -> %p, text %p -> %p, name %s\n",
766 smp->locks, smp->locks_end,
767 smp->text, smp->text_end, smp->name);
768
769 list_add_tail(&smp->next, &smp_alt_modules);
770 smp_unlock:
771 alternatives_smp_unlock(locks, locks_end, text, text_end);
772 unlock:
773 mutex_unlock(&text_mutex);
774 }
775
alternatives_smp_module_del(struct module * mod)776 void __init_or_module alternatives_smp_module_del(struct module *mod)
777 {
778 struct smp_alt_module *item;
779
780 mutex_lock(&text_mutex);
781 list_for_each_entry(item, &smp_alt_modules, next) {
782 if (mod != item->mod)
783 continue;
784 list_del(&item->next);
785 kfree(item);
786 break;
787 }
788 mutex_unlock(&text_mutex);
789 }
790
alternatives_enable_smp(void)791 void alternatives_enable_smp(void)
792 {
793 struct smp_alt_module *mod;
794
795 /* Why bother if there are no other CPUs? */
796 BUG_ON(num_possible_cpus() == 1);
797
798 mutex_lock(&text_mutex);
799
800 if (uniproc_patched) {
801 pr_info("switching to SMP code\n");
802 BUG_ON(num_online_cpus() != 1);
803 clear_cpu_cap(&boot_cpu_data, X86_FEATURE_UP);
804 clear_cpu_cap(&cpu_data(0), X86_FEATURE_UP);
805 list_for_each_entry(mod, &smp_alt_modules, next)
806 alternatives_smp_lock(mod->locks, mod->locks_end,
807 mod->text, mod->text_end);
808 uniproc_patched = false;
809 }
810 mutex_unlock(&text_mutex);
811 }
812
813 /*
814 * Return 1 if the address range is reserved for SMP-alternatives.
815 * Must hold text_mutex.
816 */
alternatives_text_reserved(void * start,void * end)817 int alternatives_text_reserved(void *start, void *end)
818 {
819 struct smp_alt_module *mod;
820 const s32 *poff;
821 u8 *text_start = start;
822 u8 *text_end = end;
823
824 lockdep_assert_held(&text_mutex);
825
826 list_for_each_entry(mod, &smp_alt_modules, next) {
827 if (mod->text > text_end || mod->text_end < text_start)
828 continue;
829 for (poff = mod->locks; poff < mod->locks_end; poff++) {
830 const u8 *ptr = (const u8 *)poff + *poff;
831
832 if (text_start <= ptr && text_end > ptr)
833 return 1;
834 }
835 }
836
837 return 0;
838 }
839 #endif /* CONFIG_SMP */
840
841 #ifdef CONFIG_PARAVIRT
apply_paravirt(struct paravirt_patch_site * start,struct paravirt_patch_site * end)842 void __init_or_module apply_paravirt(struct paravirt_patch_site *start,
843 struct paravirt_patch_site *end)
844 {
845 struct paravirt_patch_site *p;
846 char insn_buff[MAX_PATCH_LEN];
847
848 for (p = start; p < end; p++) {
849 unsigned int used;
850
851 BUG_ON(p->len > MAX_PATCH_LEN);
852 /* prep the buffer with the original instructions */
853 memcpy(insn_buff, p->instr, p->len);
854 used = pv_ops.init.patch(p->type, insn_buff, (unsigned long)p->instr, p->len);
855
856 BUG_ON(used > p->len);
857
858 /* Pad the rest with nops */
859 add_nops(insn_buff + used, p->len - used);
860 text_poke_early(p->instr, insn_buff, p->len);
861 }
862 }
863 extern struct paravirt_patch_site __start_parainstructions[],
864 __stop_parainstructions[];
865 #endif /* CONFIG_PARAVIRT */
866
867 /*
868 * Self-test for the INT3 based CALL emulation code.
869 *
870 * This exercises int3_emulate_call() to make sure INT3 pt_regs are set up
871 * properly and that there is a stack gap between the INT3 frame and the
872 * previous context. Without this gap doing a virtual PUSH on the interrupted
873 * stack would corrupt the INT3 IRET frame.
874 *
875 * See entry_{32,64}.S for more details.
876 */
877
878 /*
879 * We define the int3_magic() function in assembly to control the calling
880 * convention such that we can 'call' it from assembly.
881 */
882
883 extern void int3_magic(unsigned int *ptr); /* defined in asm */
884
885 asm (
886 " .pushsection .init.text, \"ax\", @progbits\n"
887 " .type int3_magic, @function\n"
888 "int3_magic:\n"
889 " movl $1, (%" _ASM_ARG1 ")\n"
890 ASM_RET
891 " .size int3_magic, .-int3_magic\n"
892 " .popsection\n"
893 );
894
895 extern __initdata unsigned long int3_selftest_ip; /* defined in asm below */
896
897 static int __init
int3_exception_notify(struct notifier_block * self,unsigned long val,void * data)898 int3_exception_notify(struct notifier_block *self, unsigned long val, void *data)
899 {
900 struct die_args *args = data;
901 struct pt_regs *regs = args->regs;
902
903 if (!regs || user_mode(regs))
904 return NOTIFY_DONE;
905
906 if (val != DIE_INT3)
907 return NOTIFY_DONE;
908
909 if (regs->ip - INT3_INSN_SIZE != int3_selftest_ip)
910 return NOTIFY_DONE;
911
912 int3_emulate_call(regs, (unsigned long)&int3_magic);
913 return NOTIFY_STOP;
914 }
915
int3_selftest(void)916 static void __init int3_selftest(void)
917 {
918 static __initdata struct notifier_block int3_exception_nb = {
919 .notifier_call = int3_exception_notify,
920 .priority = INT_MAX-1, /* last */
921 };
922 unsigned int val = 0;
923
924 BUG_ON(register_die_notifier(&int3_exception_nb));
925
926 /*
927 * Basically: int3_magic(&val); but really complicated :-)
928 *
929 * Stick the address of the INT3 instruction into int3_selftest_ip,
930 * then trigger the INT3, padded with NOPs to match a CALL instruction
931 * length.
932 */
933 asm volatile ("1: int3; nop; nop; nop; nop\n\t"
934 ".pushsection .init.data,\"aw\"\n\t"
935 ".align " __ASM_SEL(4, 8) "\n\t"
936 ".type int3_selftest_ip, @object\n\t"
937 ".size int3_selftest_ip, " __ASM_SEL(4, 8) "\n\t"
938 "int3_selftest_ip:\n\t"
939 __ASM_SEL(.long, .quad) " 1b\n\t"
940 ".popsection\n\t"
941 : ASM_CALL_CONSTRAINT
942 : __ASM_SEL_RAW(a, D) (&val)
943 : "memory");
944
945 BUG_ON(val != 1);
946
947 unregister_die_notifier(&int3_exception_nb);
948 }
949
alternative_instructions(void)950 void __init alternative_instructions(void)
951 {
952 int3_selftest();
953
954 /*
955 * The patching is not fully atomic, so try to avoid local
956 * interruptions that might execute the to be patched code.
957 * Other CPUs are not running.
958 */
959 stop_nmi();
960
961 /*
962 * Don't stop machine check exceptions while patching.
963 * MCEs only happen when something got corrupted and in this
964 * case we must do something about the corruption.
965 * Ignoring it is worse than an unlikely patching race.
966 * Also machine checks tend to be broadcast and if one CPU
967 * goes into machine check the others follow quickly, so we don't
968 * expect a machine check to cause undue problems during to code
969 * patching.
970 */
971
972 /*
973 * Rewrite the retpolines, must be done before alternatives since
974 * those can rewrite the retpoline thunks.
975 */
976 apply_retpolines(__retpoline_sites, __retpoline_sites_end);
977 apply_returns(__return_sites, __return_sites_end);
978
979 apply_alternatives(__alt_instructions, __alt_instructions_end);
980
981 #ifdef CONFIG_SMP
982 /* Patch to UP if other cpus not imminent. */
983 if (!noreplace_smp && (num_present_cpus() == 1 || setup_max_cpus <= 1)) {
984 uniproc_patched = true;
985 alternatives_smp_module_add(NULL, "core kernel",
986 __smp_locks, __smp_locks_end,
987 _text, _etext);
988 }
989
990 if (!uniproc_patched || num_possible_cpus() == 1) {
991 free_init_pages("SMP alternatives",
992 (unsigned long)__smp_locks,
993 (unsigned long)__smp_locks_end);
994 }
995 #endif
996
997 apply_paravirt(__parainstructions, __parainstructions_end);
998
999 restart_nmi();
1000 alternatives_patched = 1;
1001 }
1002
1003 /**
1004 * text_poke_early - Update instructions on a live kernel at boot time
1005 * @addr: address to modify
1006 * @opcode: source of the copy
1007 * @len: length to copy
1008 *
1009 * When you use this code to patch more than one byte of an instruction
1010 * you need to make sure that other CPUs cannot execute this code in parallel.
1011 * Also no thread must be currently preempted in the middle of these
1012 * instructions. And on the local CPU you need to be protected against NMI or
1013 * MCE handlers seeing an inconsistent instruction while you patch.
1014 */
text_poke_early(void * addr,const void * opcode,size_t len)1015 void __init_or_module text_poke_early(void *addr, const void *opcode,
1016 size_t len)
1017 {
1018 unsigned long flags;
1019
1020 if (boot_cpu_has(X86_FEATURE_NX) &&
1021 is_module_text_address((unsigned long)addr)) {
1022 /*
1023 * Modules text is marked initially as non-executable, so the
1024 * code cannot be running and speculative code-fetches are
1025 * prevented. Just change the code.
1026 */
1027 memcpy(addr, opcode, len);
1028 } else {
1029 local_irq_save(flags);
1030 memcpy(addr, opcode, len);
1031 local_irq_restore(flags);
1032 sync_core();
1033
1034 /*
1035 * Could also do a CLFLUSH here to speed up CPU recovery; but
1036 * that causes hangs on some VIA CPUs.
1037 */
1038 }
1039 }
1040
1041 typedef struct {
1042 struct mm_struct *mm;
1043 } temp_mm_state_t;
1044
1045 /*
1046 * Using a temporary mm allows to set temporary mappings that are not accessible
1047 * by other CPUs. Such mappings are needed to perform sensitive memory writes
1048 * that override the kernel memory protections (e.g., W^X), without exposing the
1049 * temporary page-table mappings that are required for these write operations to
1050 * other CPUs. Using a temporary mm also allows to avoid TLB shootdowns when the
1051 * mapping is torn down.
1052 *
1053 * Context: The temporary mm needs to be used exclusively by a single core. To
1054 * harden security IRQs must be disabled while the temporary mm is
1055 * loaded, thereby preventing interrupt handler bugs from overriding
1056 * the kernel memory protection.
1057 */
use_temporary_mm(struct mm_struct * mm)1058 static inline temp_mm_state_t use_temporary_mm(struct mm_struct *mm)
1059 {
1060 temp_mm_state_t temp_state;
1061
1062 lockdep_assert_irqs_disabled();
1063
1064 /*
1065 * Make sure not to be in TLB lazy mode, as otherwise we'll end up
1066 * with a stale address space WITHOUT being in lazy mode after
1067 * restoring the previous mm.
1068 */
1069 if (this_cpu_read(cpu_tlbstate.is_lazy))
1070 leave_mm(smp_processor_id());
1071
1072 temp_state.mm = this_cpu_read(cpu_tlbstate.loaded_mm);
1073 switch_mm_irqs_off(NULL, mm, current);
1074
1075 /*
1076 * If breakpoints are enabled, disable them while the temporary mm is
1077 * used. Userspace might set up watchpoints on addresses that are used
1078 * in the temporary mm, which would lead to wrong signals being sent or
1079 * crashes.
1080 *
1081 * Note that breakpoints are not disabled selectively, which also causes
1082 * kernel breakpoints (e.g., perf's) to be disabled. This might be
1083 * undesirable, but still seems reasonable as the code that runs in the
1084 * temporary mm should be short.
1085 */
1086 if (hw_breakpoint_active())
1087 hw_breakpoint_disable();
1088
1089 return temp_state;
1090 }
1091
unuse_temporary_mm(temp_mm_state_t prev_state)1092 static inline void unuse_temporary_mm(temp_mm_state_t prev_state)
1093 {
1094 lockdep_assert_irqs_disabled();
1095 switch_mm_irqs_off(NULL, prev_state.mm, current);
1096
1097 /*
1098 * Restore the breakpoints if they were disabled before the temporary mm
1099 * was loaded.
1100 */
1101 if (hw_breakpoint_active())
1102 hw_breakpoint_restore();
1103 }
1104
1105 __ro_after_init struct mm_struct *poking_mm;
1106 __ro_after_init unsigned long poking_addr;
1107
__text_poke(void * addr,const void * opcode,size_t len)1108 static void *__text_poke(void *addr, const void *opcode, size_t len)
1109 {
1110 bool cross_page_boundary = offset_in_page(addr) + len > PAGE_SIZE;
1111 struct page *pages[2] = {NULL};
1112 temp_mm_state_t prev;
1113 unsigned long flags;
1114 pte_t pte, *ptep;
1115 spinlock_t *ptl;
1116 pgprot_t pgprot;
1117
1118 /*
1119 * While boot memory allocator is running we cannot use struct pages as
1120 * they are not yet initialized. There is no way to recover.
1121 */
1122 BUG_ON(!after_bootmem);
1123
1124 if (!core_kernel_text((unsigned long)addr)) {
1125 pages[0] = vmalloc_to_page(addr);
1126 if (cross_page_boundary)
1127 pages[1] = vmalloc_to_page(addr + PAGE_SIZE);
1128 } else {
1129 pages[0] = virt_to_page(addr);
1130 WARN_ON(!PageReserved(pages[0]));
1131 if (cross_page_boundary)
1132 pages[1] = virt_to_page(addr + PAGE_SIZE);
1133 }
1134 /*
1135 * If something went wrong, crash and burn since recovery paths are not
1136 * implemented.
1137 */
1138 BUG_ON(!pages[0] || (cross_page_boundary && !pages[1]));
1139
1140 /*
1141 * Map the page without the global bit, as TLB flushing is done with
1142 * flush_tlb_mm_range(), which is intended for non-global PTEs.
1143 */
1144 pgprot = __pgprot(pgprot_val(PAGE_KERNEL) & ~_PAGE_GLOBAL);
1145
1146 /*
1147 * The lock is not really needed, but this allows to avoid open-coding.
1148 */
1149 ptep = get_locked_pte(poking_mm, poking_addr, &ptl);
1150
1151 /*
1152 * This must not fail; preallocated in poking_init().
1153 */
1154 VM_BUG_ON(!ptep);
1155
1156 local_irq_save(flags);
1157
1158 pte = mk_pte(pages[0], pgprot);
1159 set_pte_at(poking_mm, poking_addr, ptep, pte);
1160
1161 if (cross_page_boundary) {
1162 pte = mk_pte(pages[1], pgprot);
1163 set_pte_at(poking_mm, poking_addr + PAGE_SIZE, ptep + 1, pte);
1164 }
1165
1166 /*
1167 * Loading the temporary mm behaves as a compiler barrier, which
1168 * guarantees that the PTE will be set at the time memcpy() is done.
1169 */
1170 prev = use_temporary_mm(poking_mm);
1171
1172 kasan_disable_current();
1173 memcpy((u8 *)poking_addr + offset_in_page(addr), opcode, len);
1174 kasan_enable_current();
1175
1176 /*
1177 * Ensure that the PTE is only cleared after the instructions of memcpy
1178 * were issued by using a compiler barrier.
1179 */
1180 barrier();
1181
1182 pte_clear(poking_mm, poking_addr, ptep);
1183 if (cross_page_boundary)
1184 pte_clear(poking_mm, poking_addr + PAGE_SIZE, ptep + 1);
1185
1186 /*
1187 * Loading the previous page-table hierarchy requires a serializing
1188 * instruction that already allows the core to see the updated version.
1189 * Xen-PV is assumed to serialize execution in a similar manner.
1190 */
1191 unuse_temporary_mm(prev);
1192
1193 /*
1194 * Flushing the TLB might involve IPIs, which would require enabled
1195 * IRQs, but not if the mm is not used, as it is in this point.
1196 */
1197 flush_tlb_mm_range(poking_mm, poking_addr, poking_addr +
1198 (cross_page_boundary ? 2 : 1) * PAGE_SIZE,
1199 PAGE_SHIFT, false);
1200
1201 /*
1202 * If the text does not match what we just wrote then something is
1203 * fundamentally screwy; there's nothing we can really do about that.
1204 */
1205 BUG_ON(memcmp(addr, opcode, len));
1206
1207 local_irq_restore(flags);
1208 pte_unmap_unlock(ptep, ptl);
1209 return addr;
1210 }
1211
1212 /**
1213 * text_poke - Update instructions on a live kernel
1214 * @addr: address to modify
1215 * @opcode: source of the copy
1216 * @len: length to copy
1217 *
1218 * Only atomic text poke/set should be allowed when not doing early patching.
1219 * It means the size must be writable atomically and the address must be aligned
1220 * in a way that permits an atomic write. It also makes sure we fit on a single
1221 * page.
1222 *
1223 * Note that the caller must ensure that if the modified code is part of a
1224 * module, the module would not be removed during poking. This can be achieved
1225 * by registering a module notifier, and ordering module removal and patching
1226 * trough a mutex.
1227 */
text_poke(void * addr,const void * opcode,size_t len)1228 void *text_poke(void *addr, const void *opcode, size_t len)
1229 {
1230 lockdep_assert_held(&text_mutex);
1231
1232 return __text_poke(addr, opcode, len);
1233 }
1234
1235 /**
1236 * text_poke_kgdb - Update instructions on a live kernel by kgdb
1237 * @addr: address to modify
1238 * @opcode: source of the copy
1239 * @len: length to copy
1240 *
1241 * Only atomic text poke/set should be allowed when not doing early patching.
1242 * It means the size must be writable atomically and the address must be aligned
1243 * in a way that permits an atomic write. It also makes sure we fit on a single
1244 * page.
1245 *
1246 * Context: should only be used by kgdb, which ensures no other core is running,
1247 * despite the fact it does not hold the text_mutex.
1248 */
text_poke_kgdb(void * addr,const void * opcode,size_t len)1249 void *text_poke_kgdb(void *addr, const void *opcode, size_t len)
1250 {
1251 return __text_poke(addr, opcode, len);
1252 }
1253
do_sync_core(void * info)1254 static void do_sync_core(void *info)
1255 {
1256 sync_core();
1257 }
1258
text_poke_sync(void)1259 void text_poke_sync(void)
1260 {
1261 on_each_cpu(do_sync_core, NULL, 1);
1262 }
1263
1264 struct text_poke_loc {
1265 /* addr := _stext + rel_addr */
1266 s32 rel_addr;
1267 s32 disp;
1268 u8 len;
1269 u8 opcode;
1270 const u8 text[POKE_MAX_OPCODE_SIZE];
1271 /* see text_poke_bp_batch() */
1272 u8 old;
1273 };
1274
1275 struct bp_patching_desc {
1276 struct text_poke_loc *vec;
1277 int nr_entries;
1278 atomic_t refs;
1279 };
1280
1281 static struct bp_patching_desc *bp_desc;
1282
1283 static __always_inline
try_get_desc(struct bp_patching_desc ** descp)1284 struct bp_patching_desc *try_get_desc(struct bp_patching_desc **descp)
1285 {
1286 /* rcu_dereference */
1287 struct bp_patching_desc *desc = __READ_ONCE(*descp);
1288
1289 if (!desc || !arch_atomic_inc_not_zero(&desc->refs))
1290 return NULL;
1291
1292 return desc;
1293 }
1294
put_desc(struct bp_patching_desc * desc)1295 static __always_inline void put_desc(struct bp_patching_desc *desc)
1296 {
1297 smp_mb__before_atomic();
1298 arch_atomic_dec(&desc->refs);
1299 }
1300
text_poke_addr(struct text_poke_loc * tp)1301 static __always_inline void *text_poke_addr(struct text_poke_loc *tp)
1302 {
1303 return _stext + tp->rel_addr;
1304 }
1305
patch_cmp(const void * key,const void * elt)1306 static __always_inline int patch_cmp(const void *key, const void *elt)
1307 {
1308 struct text_poke_loc *tp = (struct text_poke_loc *) elt;
1309
1310 if (key < text_poke_addr(tp))
1311 return -1;
1312 if (key > text_poke_addr(tp))
1313 return 1;
1314 return 0;
1315 }
1316
poke_int3_handler(struct pt_regs * regs)1317 noinstr int poke_int3_handler(struct pt_regs *regs)
1318 {
1319 struct bp_patching_desc *desc;
1320 struct text_poke_loc *tp;
1321 int ret = 0;
1322 void *ip;
1323
1324 if (user_mode(regs))
1325 return 0;
1326
1327 /*
1328 * Having observed our INT3 instruction, we now must observe
1329 * bp_desc:
1330 *
1331 * bp_desc = desc INT3
1332 * WMB RMB
1333 * write INT3 if (desc)
1334 */
1335 smp_rmb();
1336
1337 desc = try_get_desc(&bp_desc);
1338 if (!desc)
1339 return 0;
1340
1341 /*
1342 * Discount the INT3. See text_poke_bp_batch().
1343 */
1344 ip = (void *) regs->ip - INT3_INSN_SIZE;
1345
1346 /*
1347 * Skip the binary search if there is a single member in the vector.
1348 */
1349 if (unlikely(desc->nr_entries > 1)) {
1350 tp = __inline_bsearch(ip, desc->vec, desc->nr_entries,
1351 sizeof(struct text_poke_loc),
1352 patch_cmp);
1353 if (!tp)
1354 goto out_put;
1355 } else {
1356 tp = desc->vec;
1357 if (text_poke_addr(tp) != ip)
1358 goto out_put;
1359 }
1360
1361 ip += tp->len;
1362
1363 switch (tp->opcode) {
1364 case INT3_INSN_OPCODE:
1365 /*
1366 * Someone poked an explicit INT3, they'll want to handle it,
1367 * do not consume.
1368 */
1369 goto out_put;
1370
1371 case RET_INSN_OPCODE:
1372 int3_emulate_ret(regs);
1373 break;
1374
1375 case CALL_INSN_OPCODE:
1376 int3_emulate_call(regs, (long)ip + tp->disp);
1377 break;
1378
1379 case JMP32_INSN_OPCODE:
1380 case JMP8_INSN_OPCODE:
1381 int3_emulate_jmp(regs, (long)ip + tp->disp);
1382 break;
1383
1384 default:
1385 BUG();
1386 }
1387
1388 ret = 1;
1389
1390 out_put:
1391 put_desc(desc);
1392 return ret;
1393 }
1394
1395 #define TP_VEC_MAX (PAGE_SIZE / sizeof(struct text_poke_loc))
1396 static struct text_poke_loc tp_vec[TP_VEC_MAX];
1397 static int tp_vec_nr;
1398
1399 /**
1400 * text_poke_bp_batch() -- update instructions on live kernel on SMP
1401 * @tp: vector of instructions to patch
1402 * @nr_entries: number of entries in the vector
1403 *
1404 * Modify multi-byte instruction by using int3 breakpoint on SMP.
1405 * We completely avoid stop_machine() here, and achieve the
1406 * synchronization using int3 breakpoint.
1407 *
1408 * The way it is done:
1409 * - For each entry in the vector:
1410 * - add a int3 trap to the address that will be patched
1411 * - sync cores
1412 * - For each entry in the vector:
1413 * - update all but the first byte of the patched range
1414 * - sync cores
1415 * - For each entry in the vector:
1416 * - replace the first byte (int3) by the first byte of
1417 * replacing opcode
1418 * - sync cores
1419 */
text_poke_bp_batch(struct text_poke_loc * tp,unsigned int nr_entries)1420 static void text_poke_bp_batch(struct text_poke_loc *tp, unsigned int nr_entries)
1421 {
1422 struct bp_patching_desc desc = {
1423 .vec = tp,
1424 .nr_entries = nr_entries,
1425 .refs = ATOMIC_INIT(1),
1426 };
1427 unsigned char int3 = INT3_INSN_OPCODE;
1428 unsigned int i;
1429 int do_sync;
1430
1431 lockdep_assert_held(&text_mutex);
1432
1433 smp_store_release(&bp_desc, &desc); /* rcu_assign_pointer */
1434
1435 /*
1436 * Corresponding read barrier in int3 notifier for making sure the
1437 * nr_entries and handler are correctly ordered wrt. patching.
1438 */
1439 smp_wmb();
1440
1441 /*
1442 * First step: add a int3 trap to the address that will be patched.
1443 */
1444 for (i = 0; i < nr_entries; i++) {
1445 tp[i].old = *(u8 *)text_poke_addr(&tp[i]);
1446 text_poke(text_poke_addr(&tp[i]), &int3, INT3_INSN_SIZE);
1447 }
1448
1449 text_poke_sync();
1450
1451 /*
1452 * Second step: update all but the first byte of the patched range.
1453 */
1454 for (do_sync = 0, i = 0; i < nr_entries; i++) {
1455 u8 old[POKE_MAX_OPCODE_SIZE] = { tp[i].old, };
1456 int len = tp[i].len;
1457
1458 if (len - INT3_INSN_SIZE > 0) {
1459 memcpy(old + INT3_INSN_SIZE,
1460 text_poke_addr(&tp[i]) + INT3_INSN_SIZE,
1461 len - INT3_INSN_SIZE);
1462 text_poke(text_poke_addr(&tp[i]) + INT3_INSN_SIZE,
1463 (const char *)tp[i].text + INT3_INSN_SIZE,
1464 len - INT3_INSN_SIZE);
1465 do_sync++;
1466 }
1467
1468 /*
1469 * Emit a perf event to record the text poke, primarily to
1470 * support Intel PT decoding which must walk the executable code
1471 * to reconstruct the trace. The flow up to here is:
1472 * - write INT3 byte
1473 * - IPI-SYNC
1474 * - write instruction tail
1475 * At this point the actual control flow will be through the
1476 * INT3 and handler and not hit the old or new instruction.
1477 * Intel PT outputs FUP/TIP packets for the INT3, so the flow
1478 * can still be decoded. Subsequently:
1479 * - emit RECORD_TEXT_POKE with the new instruction
1480 * - IPI-SYNC
1481 * - write first byte
1482 * - IPI-SYNC
1483 * So before the text poke event timestamp, the decoder will see
1484 * either the old instruction flow or FUP/TIP of INT3. After the
1485 * text poke event timestamp, the decoder will see either the
1486 * new instruction flow or FUP/TIP of INT3. Thus decoders can
1487 * use the timestamp as the point at which to modify the
1488 * executable code.
1489 * The old instruction is recorded so that the event can be
1490 * processed forwards or backwards.
1491 */
1492 perf_event_text_poke(text_poke_addr(&tp[i]), old, len,
1493 tp[i].text, len);
1494 }
1495
1496 if (do_sync) {
1497 /*
1498 * According to Intel, this core syncing is very likely
1499 * not necessary and we'd be safe even without it. But
1500 * better safe than sorry (plus there's not only Intel).
1501 */
1502 text_poke_sync();
1503 }
1504
1505 /*
1506 * Third step: replace the first byte (int3) by the first byte of
1507 * replacing opcode.
1508 */
1509 for (do_sync = 0, i = 0; i < nr_entries; i++) {
1510 if (tp[i].text[0] == INT3_INSN_OPCODE)
1511 continue;
1512
1513 text_poke(text_poke_addr(&tp[i]), tp[i].text, INT3_INSN_SIZE);
1514 do_sync++;
1515 }
1516
1517 if (do_sync)
1518 text_poke_sync();
1519
1520 /*
1521 * Remove and synchronize_rcu(), except we have a very primitive
1522 * refcount based completion.
1523 */
1524 WRITE_ONCE(bp_desc, NULL); /* RCU_INIT_POINTER */
1525 if (!atomic_dec_and_test(&desc.refs))
1526 atomic_cond_read_acquire(&desc.refs, !VAL);
1527 }
1528
text_poke_loc_init(struct text_poke_loc * tp,void * addr,const void * opcode,size_t len,const void * emulate)1529 static void text_poke_loc_init(struct text_poke_loc *tp, void *addr,
1530 const void *opcode, size_t len, const void *emulate)
1531 {
1532 struct insn insn;
1533 int ret, i;
1534
1535 memcpy((void *)tp->text, opcode, len);
1536 if (!emulate)
1537 emulate = opcode;
1538
1539 ret = insn_decode_kernel(&insn, emulate);
1540 BUG_ON(ret < 0);
1541
1542 tp->rel_addr = addr - (void *)_stext;
1543 tp->len = len;
1544 tp->opcode = insn.opcode.bytes[0];
1545
1546 switch (tp->opcode) {
1547 case RET_INSN_OPCODE:
1548 case JMP32_INSN_OPCODE:
1549 case JMP8_INSN_OPCODE:
1550 /*
1551 * Control flow instructions without implied execution of the
1552 * next instruction can be padded with INT3.
1553 */
1554 for (i = insn.length; i < len; i++)
1555 BUG_ON(tp->text[i] != INT3_INSN_OPCODE);
1556 break;
1557
1558 default:
1559 BUG_ON(len != insn.length);
1560 };
1561
1562
1563 switch (tp->opcode) {
1564 case INT3_INSN_OPCODE:
1565 case RET_INSN_OPCODE:
1566 break;
1567
1568 case CALL_INSN_OPCODE:
1569 case JMP32_INSN_OPCODE:
1570 case JMP8_INSN_OPCODE:
1571 tp->disp = insn.immediate.value;
1572 break;
1573
1574 default: /* assume NOP */
1575 switch (len) {
1576 case 2: /* NOP2 -- emulate as JMP8+0 */
1577 BUG_ON(memcmp(emulate, ideal_nops[len], len));
1578 tp->opcode = JMP8_INSN_OPCODE;
1579 tp->disp = 0;
1580 break;
1581
1582 case 5: /* NOP5 -- emulate as JMP32+0 */
1583 BUG_ON(memcmp(emulate, ideal_nops[NOP_ATOMIC5], len));
1584 tp->opcode = JMP32_INSN_OPCODE;
1585 tp->disp = 0;
1586 break;
1587
1588 default: /* unknown instruction */
1589 BUG();
1590 }
1591 break;
1592 }
1593 }
1594
1595 /*
1596 * We hard rely on the tp_vec being ordered; ensure this is so by flushing
1597 * early if needed.
1598 */
tp_order_fail(void * addr)1599 static bool tp_order_fail(void *addr)
1600 {
1601 struct text_poke_loc *tp;
1602
1603 if (!tp_vec_nr)
1604 return false;
1605
1606 if (!addr) /* force */
1607 return true;
1608
1609 tp = &tp_vec[tp_vec_nr - 1];
1610 if ((unsigned long)text_poke_addr(tp) > (unsigned long)addr)
1611 return true;
1612
1613 return false;
1614 }
1615
text_poke_flush(void * addr)1616 static void text_poke_flush(void *addr)
1617 {
1618 if (tp_vec_nr == TP_VEC_MAX || tp_order_fail(addr)) {
1619 text_poke_bp_batch(tp_vec, tp_vec_nr);
1620 tp_vec_nr = 0;
1621 }
1622 }
1623
text_poke_finish(void)1624 void text_poke_finish(void)
1625 {
1626 text_poke_flush(NULL);
1627 }
1628
text_poke_queue(void * addr,const void * opcode,size_t len,const void * emulate)1629 void __ref text_poke_queue(void *addr, const void *opcode, size_t len, const void *emulate)
1630 {
1631 struct text_poke_loc *tp;
1632
1633 if (unlikely(system_state == SYSTEM_BOOTING)) {
1634 text_poke_early(addr, opcode, len);
1635 return;
1636 }
1637
1638 text_poke_flush(addr);
1639
1640 tp = &tp_vec[tp_vec_nr++];
1641 text_poke_loc_init(tp, addr, opcode, len, emulate);
1642 }
1643
1644 /**
1645 * text_poke_bp() -- update instructions on live kernel on SMP
1646 * @addr: address to patch
1647 * @opcode: opcode of new instruction
1648 * @len: length to copy
1649 * @handler: address to jump to when the temporary breakpoint is hit
1650 *
1651 * Update a single instruction with the vector in the stack, avoiding
1652 * dynamically allocated memory. This function should be used when it is
1653 * not possible to allocate memory.
1654 */
text_poke_bp(void * addr,const void * opcode,size_t len,const void * emulate)1655 void __ref text_poke_bp(void *addr, const void *opcode, size_t len, const void *emulate)
1656 {
1657 struct text_poke_loc tp;
1658
1659 if (unlikely(system_state == SYSTEM_BOOTING)) {
1660 text_poke_early(addr, opcode, len);
1661 return;
1662 }
1663
1664 text_poke_loc_init(&tp, addr, opcode, len, emulate);
1665 text_poke_bp_batch(&tp, 1);
1666 }
1667