• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // SPDX-License-Identifier: GPL-2.0-only
2 #define pr_fmt(fmt) "SMP alternatives: " fmt
3 
4 #include <linux/module.h>
5 #include <linux/sched.h>
6 #include <linux/perf_event.h>
7 #include <linux/mutex.h>
8 #include <linux/list.h>
9 #include <linux/stringify.h>
10 #include <linux/highmem.h>
11 #include <linux/mm.h>
12 #include <linux/vmalloc.h>
13 #include <linux/memory.h>
14 #include <linux/stop_machine.h>
15 #include <linux/slab.h>
16 #include <linux/kdebug.h>
17 #include <linux/kprobes.h>
18 #include <linux/mmu_context.h>
19 #include <linux/bsearch.h>
20 #include <linux/sync_core.h>
21 #include <asm/text-patching.h>
22 #include <asm/alternative.h>
23 #include <asm/sections.h>
24 #include <asm/mce.h>
25 #include <asm/nmi.h>
26 #include <asm/cacheflush.h>
27 #include <asm/tlbflush.h>
28 #include <asm/insn.h>
29 #include <asm/io.h>
30 #include <asm/fixmap.h>
31 #include <asm/asm-prototypes.h>
32 
33 int __read_mostly alternatives_patched;
34 
35 EXPORT_SYMBOL_GPL(alternatives_patched);
36 
37 #define MAX_PATCH_LEN (255-1)
38 
39 static int __initdata_or_module debug_alternative;
40 
debug_alt(char * str)41 static int __init debug_alt(char *str)
42 {
43 	debug_alternative = 1;
44 	return 1;
45 }
46 __setup("debug-alternative", debug_alt);
47 
48 static int noreplace_smp;
49 
setup_noreplace_smp(char * str)50 static int __init setup_noreplace_smp(char *str)
51 {
52 	noreplace_smp = 1;
53 	return 1;
54 }
55 __setup("noreplace-smp", setup_noreplace_smp);
56 
57 #define DPRINTK(fmt, args...)						\
58 do {									\
59 	if (debug_alternative)						\
60 		printk(KERN_DEBUG pr_fmt(fmt) "\n", ##args);		\
61 } while (0)
62 
63 #define DUMP_BYTES(buf, len, fmt, args...)				\
64 do {									\
65 	if (unlikely(debug_alternative)) {				\
66 		int j;							\
67 									\
68 		if (!(len))						\
69 			break;						\
70 									\
71 		printk(KERN_DEBUG pr_fmt(fmt), ##args);			\
72 		for (j = 0; j < (len) - 1; j++)				\
73 			printk(KERN_CONT "%02hhx ", buf[j]);		\
74 		printk(KERN_CONT "%02hhx\n", buf[j]);			\
75 	}								\
76 } while (0)
77 
78 /*
79  * Each GENERIC_NOPX is of X bytes, and defined as an array of bytes
80  * that correspond to that nop. Getting from one nop to the next, we
81  * add to the array the offset that is equal to the sum of all sizes of
82  * nops preceding the one we are after.
83  *
84  * Note: The GENERIC_NOP5_ATOMIC is at the end, as it breaks the
85  * nice symmetry of sizes of the previous nops.
86  */
87 #if defined(GENERIC_NOP1) && !defined(CONFIG_X86_64)
88 static const unsigned char intelnops[] =
89 {
90 	GENERIC_NOP1,
91 	GENERIC_NOP2,
92 	GENERIC_NOP3,
93 	GENERIC_NOP4,
94 	GENERIC_NOP5,
95 	GENERIC_NOP6,
96 	GENERIC_NOP7,
97 	GENERIC_NOP8,
98 	GENERIC_NOP5_ATOMIC
99 };
100 static const unsigned char * const intel_nops[ASM_NOP_MAX+2] =
101 {
102 	NULL,
103 	intelnops,
104 	intelnops + 1,
105 	intelnops + 1 + 2,
106 	intelnops + 1 + 2 + 3,
107 	intelnops + 1 + 2 + 3 + 4,
108 	intelnops + 1 + 2 + 3 + 4 + 5,
109 	intelnops + 1 + 2 + 3 + 4 + 5 + 6,
110 	intelnops + 1 + 2 + 3 + 4 + 5 + 6 + 7,
111 	intelnops + 1 + 2 + 3 + 4 + 5 + 6 + 7 + 8,
112 };
113 #endif
114 
115 #ifdef K8_NOP1
116 static const unsigned char k8nops[] =
117 {
118 	K8_NOP1,
119 	K8_NOP2,
120 	K8_NOP3,
121 	K8_NOP4,
122 	K8_NOP5,
123 	K8_NOP6,
124 	K8_NOP7,
125 	K8_NOP8,
126 	K8_NOP5_ATOMIC
127 };
128 static const unsigned char * const k8_nops[ASM_NOP_MAX+2] =
129 {
130 	NULL,
131 	k8nops,
132 	k8nops + 1,
133 	k8nops + 1 + 2,
134 	k8nops + 1 + 2 + 3,
135 	k8nops + 1 + 2 + 3 + 4,
136 	k8nops + 1 + 2 + 3 + 4 + 5,
137 	k8nops + 1 + 2 + 3 + 4 + 5 + 6,
138 	k8nops + 1 + 2 + 3 + 4 + 5 + 6 + 7,
139 	k8nops + 1 + 2 + 3 + 4 + 5 + 6 + 7 + 8,
140 };
141 #endif
142 
143 #if defined(K7_NOP1) && !defined(CONFIG_X86_64)
144 static const unsigned char k7nops[] =
145 {
146 	K7_NOP1,
147 	K7_NOP2,
148 	K7_NOP3,
149 	K7_NOP4,
150 	K7_NOP5,
151 	K7_NOP6,
152 	K7_NOP7,
153 	K7_NOP8,
154 	K7_NOP5_ATOMIC
155 };
156 static const unsigned char * const k7_nops[ASM_NOP_MAX+2] =
157 {
158 	NULL,
159 	k7nops,
160 	k7nops + 1,
161 	k7nops + 1 + 2,
162 	k7nops + 1 + 2 + 3,
163 	k7nops + 1 + 2 + 3 + 4,
164 	k7nops + 1 + 2 + 3 + 4 + 5,
165 	k7nops + 1 + 2 + 3 + 4 + 5 + 6,
166 	k7nops + 1 + 2 + 3 + 4 + 5 + 6 + 7,
167 	k7nops + 1 + 2 + 3 + 4 + 5 + 6 + 7 + 8,
168 };
169 #endif
170 
171 #ifdef P6_NOP1
172 static const unsigned char p6nops[] =
173 {
174 	P6_NOP1,
175 	P6_NOP2,
176 	P6_NOP3,
177 	P6_NOP4,
178 	P6_NOP5,
179 	P6_NOP6,
180 	P6_NOP7,
181 	P6_NOP8,
182 	P6_NOP5_ATOMIC
183 };
184 static const unsigned char * const p6_nops[ASM_NOP_MAX+2] =
185 {
186 	NULL,
187 	p6nops,
188 	p6nops + 1,
189 	p6nops + 1 + 2,
190 	p6nops + 1 + 2 + 3,
191 	p6nops + 1 + 2 + 3 + 4,
192 	p6nops + 1 + 2 + 3 + 4 + 5,
193 	p6nops + 1 + 2 + 3 + 4 + 5 + 6,
194 	p6nops + 1 + 2 + 3 + 4 + 5 + 6 + 7,
195 	p6nops + 1 + 2 + 3 + 4 + 5 + 6 + 7 + 8,
196 };
197 #endif
198 
199 /* Initialize these to a safe default */
200 #ifdef CONFIG_X86_64
201 const unsigned char * const *ideal_nops = p6_nops;
202 #else
203 const unsigned char * const *ideal_nops = intel_nops;
204 #endif
205 
arch_init_ideal_nops(void)206 void __init arch_init_ideal_nops(void)
207 {
208 	switch (boot_cpu_data.x86_vendor) {
209 	case X86_VENDOR_INTEL:
210 		/*
211 		 * Due to a decoder implementation quirk, some
212 		 * specific Intel CPUs actually perform better with
213 		 * the "k8_nops" than with the SDM-recommended NOPs.
214 		 */
215 		if (boot_cpu_data.x86 == 6 &&
216 		    boot_cpu_data.x86_model >= 0x0f &&
217 		    boot_cpu_data.x86_model != 0x1c &&
218 		    boot_cpu_data.x86_model != 0x26 &&
219 		    boot_cpu_data.x86_model != 0x27 &&
220 		    boot_cpu_data.x86_model < 0x30) {
221 			ideal_nops = k8_nops;
222 		} else if (boot_cpu_has(X86_FEATURE_NOPL)) {
223 			   ideal_nops = p6_nops;
224 		} else {
225 #ifdef CONFIG_X86_64
226 			ideal_nops = k8_nops;
227 #else
228 			ideal_nops = intel_nops;
229 #endif
230 		}
231 		break;
232 
233 	case X86_VENDOR_HYGON:
234 		ideal_nops = p6_nops;
235 		return;
236 
237 	case X86_VENDOR_AMD:
238 		if (boot_cpu_data.x86 > 0xf) {
239 			ideal_nops = p6_nops;
240 			return;
241 		}
242 
243 		fallthrough;
244 
245 	default:
246 #ifdef CONFIG_X86_64
247 		ideal_nops = k8_nops;
248 #else
249 		if (boot_cpu_has(X86_FEATURE_K8))
250 			ideal_nops = k8_nops;
251 		else if (boot_cpu_has(X86_FEATURE_K7))
252 			ideal_nops = k7_nops;
253 		else
254 			ideal_nops = intel_nops;
255 #endif
256 	}
257 }
258 
259 /* Use this to add nops to a buffer, then text_poke the whole buffer. */
add_nops(void * insns,unsigned int len)260 static void __init_or_module add_nops(void *insns, unsigned int len)
261 {
262 	while (len > 0) {
263 		unsigned int noplen = len;
264 		if (noplen > ASM_NOP_MAX)
265 			noplen = ASM_NOP_MAX;
266 		memcpy(insns, ideal_nops[noplen], noplen);
267 		insns += noplen;
268 		len -= noplen;
269 	}
270 }
271 
272 extern s32 __retpoline_sites[], __retpoline_sites_end[];
273 extern s32 __return_sites[], __return_sites_end[];
274 extern struct alt_instr __alt_instructions[], __alt_instructions_end[];
275 extern s32 __smp_locks[], __smp_locks_end[];
276 void text_poke_early(void *addr, const void *opcode, size_t len);
277 
278 /*
279  * Are we looking at a near JMP with a 1 or 4-byte displacement.
280  */
is_jmp(const u8 opcode)281 static inline bool is_jmp(const u8 opcode)
282 {
283 	return opcode == 0xeb || opcode == 0xe9;
284 }
285 
286 static void __init_or_module
recompute_jump(struct alt_instr * a,u8 * orig_insn,u8 * repl_insn,u8 * insn_buff)287 recompute_jump(struct alt_instr *a, u8 *orig_insn, u8 *repl_insn, u8 *insn_buff)
288 {
289 	u8 *next_rip, *tgt_rip;
290 	s32 n_dspl, o_dspl;
291 	int repl_len;
292 
293 	if (a->replacementlen != 5)
294 		return;
295 
296 	o_dspl = *(s32 *)(insn_buff + 1);
297 
298 	/* next_rip of the replacement JMP */
299 	next_rip = repl_insn + a->replacementlen;
300 	/* target rip of the replacement JMP */
301 	tgt_rip  = next_rip + o_dspl;
302 	n_dspl = tgt_rip - orig_insn;
303 
304 	DPRINTK("target RIP: %px, new_displ: 0x%x", tgt_rip, n_dspl);
305 
306 	if (tgt_rip - orig_insn >= 0) {
307 		if (n_dspl - 2 <= 127)
308 			goto two_byte_jmp;
309 		else
310 			goto five_byte_jmp;
311 	/* negative offset */
312 	} else {
313 		if (((n_dspl - 2) & 0xff) == (n_dspl - 2))
314 			goto two_byte_jmp;
315 		else
316 			goto five_byte_jmp;
317 	}
318 
319 two_byte_jmp:
320 	n_dspl -= 2;
321 
322 	insn_buff[0] = 0xeb;
323 	insn_buff[1] = (s8)n_dspl;
324 	add_nops(insn_buff + 2, 3);
325 
326 	repl_len = 2;
327 	goto done;
328 
329 five_byte_jmp:
330 	n_dspl -= 5;
331 
332 	insn_buff[0] = 0xe9;
333 	*(s32 *)&insn_buff[1] = n_dspl;
334 
335 	repl_len = 5;
336 
337 done:
338 
339 	DPRINTK("final displ: 0x%08x, JMP 0x%lx",
340 		n_dspl, (unsigned long)orig_insn + n_dspl + repl_len);
341 }
342 
343 /*
344  * optimize_nops_range() - Optimize a sequence of single byte NOPs (0x90)
345  *
346  * @instr: instruction byte stream
347  * @instrlen: length of the above
348  * @off: offset within @instr where the first NOP has been detected
349  *
350  * Return: number of NOPs found (and replaced).
351  */
optimize_nops_range(u8 * instr,u8 instrlen,int off)352 static __always_inline int optimize_nops_range(u8 *instr, u8 instrlen, int off)
353 {
354 	unsigned long flags;
355 	int i = off, nnops;
356 
357 	while (i < instrlen) {
358 		if (instr[i] != 0x90)
359 			break;
360 
361 		i++;
362 	}
363 
364 	nnops = i - off;
365 
366 	if (nnops <= 1)
367 		return nnops;
368 
369 	local_irq_save(flags);
370 	add_nops(instr + off, nnops);
371 	local_irq_restore(flags);
372 
373 	DUMP_BYTES(instr, instrlen, "%px: [%d:%d) optimized NOPs: ", instr, off, i);
374 
375 	return nnops;
376 }
377 
378 /*
379  * "noinline" to cause control flow change and thus invalidate I$ and
380  * cause refetch after modification.
381  */
optimize_nops(u8 * instr,size_t len)382 static void __init_or_module noinline optimize_nops(u8 *instr, size_t len)
383 {
384 	struct insn insn;
385 	int i = 0;
386 
387 	/*
388 	 * Jump over the non-NOP insns and optimize single-byte NOPs into bigger
389 	 * ones.
390 	 */
391 	for (;;) {
392 		if (insn_decode_kernel(&insn, &instr[i]))
393 			return;
394 
395 		/*
396 		 * See if this and any potentially following NOPs can be
397 		 * optimized.
398 		 */
399 		if (insn.length == 1 && insn.opcode.bytes[0] == 0x90)
400 			i += optimize_nops_range(instr, len, i);
401 		else
402 			i += insn.length;
403 
404 		if (i >= len)
405 			return;
406 	}
407 }
408 
409 /*
410  * Replace instructions with better alternatives for this CPU type. This runs
411  * before SMP is initialized to avoid SMP problems with self modifying code.
412  * This implies that asymmetric systems where APs have less capabilities than
413  * the boot processor are not handled. Tough. Make sure you disable such
414  * features by hand.
415  *
416  * Marked "noinline" to cause control flow change and thus insn cache
417  * to refetch changed I$ lines.
418  */
apply_alternatives(struct alt_instr * start,struct alt_instr * end)419 void __init_or_module noinline apply_alternatives(struct alt_instr *start,
420 						  struct alt_instr *end)
421 {
422 	struct alt_instr *a;
423 	u8 *instr, *replacement;
424 	u8 insn_buff[MAX_PATCH_LEN];
425 
426 	DPRINTK("alt table %px, -> %px", start, end);
427 	/*
428 	 * The scan order should be from start to end. A later scanned
429 	 * alternative code can overwrite previously scanned alternative code.
430 	 * Some kernel functions (e.g. memcpy, memset, etc) use this order to
431 	 * patch code.
432 	 *
433 	 * So be careful if you want to change the scan order to any other
434 	 * order.
435 	 */
436 	for (a = start; a < end; a++) {
437 		int insn_buff_sz = 0;
438 		/* Mask away "NOT" flag bit for feature to test. */
439 		u16 feature = a->cpuid & ~ALTINSTR_FLAG_INV;
440 
441 		instr = (u8 *)&a->instr_offset + a->instr_offset;
442 		replacement = (u8 *)&a->repl_offset + a->repl_offset;
443 		BUG_ON(a->instrlen > sizeof(insn_buff));
444 		BUG_ON(feature >= (NCAPINTS + NBUGINTS) * 32);
445 
446 		/*
447 		 * Patch if either:
448 		 * - feature is present
449 		 * - feature not present but ALTINSTR_FLAG_INV is set to mean,
450 		 *   patch if feature is *NOT* present.
451 		 */
452 		if (!boot_cpu_has(feature) == !(a->cpuid & ALTINSTR_FLAG_INV))
453 			goto next;
454 
455 		DPRINTK("feat: %s%d*32+%d, old: (%pS (%px) len: %d), repl: (%px, len: %d)",
456 			(a->cpuid & ALTINSTR_FLAG_INV) ? "!" : "",
457 			feature >> 5,
458 			feature & 0x1f,
459 			instr, instr, a->instrlen,
460 			replacement, a->replacementlen);
461 
462 		DUMP_BYTES(instr, a->instrlen, "%px: old_insn: ", instr);
463 		DUMP_BYTES(replacement, a->replacementlen, "%px: rpl_insn: ", replacement);
464 
465 		memcpy(insn_buff, replacement, a->replacementlen);
466 		insn_buff_sz = a->replacementlen;
467 
468 		/*
469 		 * 0xe8 is a relative jump; fix the offset.
470 		 *
471 		 * Instruction length is checked before the opcode to avoid
472 		 * accessing uninitialized bytes for zero-length replacements.
473 		 */
474 		if (a->replacementlen == 5 && *insn_buff == 0xe8) {
475 			*(s32 *)(insn_buff + 1) += replacement - instr;
476 			DPRINTK("Fix CALL offset: 0x%x, CALL 0x%lx",
477 				*(s32 *)(insn_buff + 1),
478 				(unsigned long)instr + *(s32 *)(insn_buff + 1) + 5);
479 		}
480 
481 		if (a->replacementlen && is_jmp(replacement[0]))
482 			recompute_jump(a, instr, replacement, insn_buff);
483 
484 		for (; insn_buff_sz < a->instrlen; insn_buff_sz++)
485 			insn_buff[insn_buff_sz] = 0x90;
486 
487 		DUMP_BYTES(insn_buff, insn_buff_sz, "%px: final_insn: ", instr);
488 
489 		text_poke_early(instr, insn_buff, insn_buff_sz);
490 
491 next:
492 		optimize_nops(instr, a->instrlen);
493 	}
494 }
495 
496 #if defined(CONFIG_RETPOLINE) && defined(CONFIG_STACK_VALIDATION)
497 
498 /*
499  * CALL/JMP *%\reg
500  */
emit_indirect(int op,int reg,u8 * bytes)501 static int emit_indirect(int op, int reg, u8 *bytes)
502 {
503 	int i = 0;
504 	u8 modrm;
505 
506 	switch (op) {
507 	case CALL_INSN_OPCODE:
508 		modrm = 0x10; /* Reg = 2; CALL r/m */
509 		break;
510 
511 	case JMP32_INSN_OPCODE:
512 		modrm = 0x20; /* Reg = 4; JMP r/m */
513 		break;
514 
515 	default:
516 		WARN_ON_ONCE(1);
517 		return -1;
518 	}
519 
520 	if (reg >= 8) {
521 		bytes[i++] = 0x41; /* REX.B prefix */
522 		reg -= 8;
523 	}
524 
525 	modrm |= 0xc0; /* Mod = 3 */
526 	modrm += reg;
527 
528 	bytes[i++] = 0xff; /* opcode */
529 	bytes[i++] = modrm;
530 
531 	return i;
532 }
533 
534 /*
535  * Rewrite the compiler generated retpoline thunk calls.
536  *
537  * For spectre_v2=off (!X86_FEATURE_RETPOLINE), rewrite them into immediate
538  * indirect instructions, avoiding the extra indirection.
539  *
540  * For example, convert:
541  *
542  *   CALL __x86_indirect_thunk_\reg
543  *
544  * into:
545  *
546  *   CALL *%\reg
547  *
548  * It also tries to inline spectre_v2=retpoline,amd when size permits.
549  */
patch_retpoline(void * addr,struct insn * insn,u8 * bytes)550 static int patch_retpoline(void *addr, struct insn *insn, u8 *bytes)
551 {
552 	retpoline_thunk_t *target;
553 	int reg, ret, i = 0;
554 	u8 op, cc;
555 
556 	target = addr + insn->length + insn->immediate.value;
557 	reg = target - __x86_indirect_thunk_array;
558 
559 	if (WARN_ON_ONCE(reg & ~0xf))
560 		return -1;
561 
562 	/* If anyone ever does: CALL/JMP *%rsp, we're in deep trouble. */
563 	BUG_ON(reg == 4);
564 
565 	if (cpu_feature_enabled(X86_FEATURE_RETPOLINE) &&
566 	    !cpu_feature_enabled(X86_FEATURE_RETPOLINE_LFENCE))
567 		return -1;
568 
569 	op = insn->opcode.bytes[0];
570 
571 	/*
572 	 * Convert:
573 	 *
574 	 *   Jcc.d32 __x86_indirect_thunk_\reg
575 	 *
576 	 * into:
577 	 *
578 	 *   Jncc.d8 1f
579 	 *   [ LFENCE ]
580 	 *   JMP *%\reg
581 	 *   [ NOP ]
582 	 * 1:
583 	 */
584 	/* Jcc.d32 second opcode byte is in the range: 0x80-0x8f */
585 	if (op == 0x0f && (insn->opcode.bytes[1] & 0xf0) == 0x80) {
586 		cc = insn->opcode.bytes[1] & 0xf;
587 		cc ^= 1; /* invert condition */
588 
589 		bytes[i++] = 0x70 + cc;        /* Jcc.d8 */
590 		bytes[i++] = insn->length - 2; /* sizeof(Jcc.d8) == 2 */
591 
592 		/* Continue as if: JMP.d32 __x86_indirect_thunk_\reg */
593 		op = JMP32_INSN_OPCODE;
594 	}
595 
596 	/*
597 	 * For RETPOLINE_AMD: prepend the indirect CALL/JMP with an LFENCE.
598 	 */
599 	if (cpu_feature_enabled(X86_FEATURE_RETPOLINE_LFENCE)) {
600 		bytes[i++] = 0x0f;
601 		bytes[i++] = 0xae;
602 		bytes[i++] = 0xe8; /* LFENCE */
603 	}
604 
605 	ret = emit_indirect(op, reg, bytes + i);
606 	if (ret < 0)
607 		return ret;
608 	i += ret;
609 
610 	for (; i < insn->length;)
611 		bytes[i++] = 0x90;
612 
613 	return i;
614 }
615 
616 /*
617  * Generated by 'objtool --retpoline'.
618  */
apply_retpolines(s32 * start,s32 * end)619 void __init_or_module noinline apply_retpolines(s32 *start, s32 *end)
620 {
621 	s32 *s;
622 
623 	for (s = start; s < end; s++) {
624 		void *addr = (void *)s + *s;
625 		struct insn insn;
626 		int len, ret;
627 		u8 bytes[16];
628 		u8 op1, op2;
629 
630 		ret = insn_decode_kernel(&insn, addr);
631 		if (WARN_ON_ONCE(ret < 0))
632 			continue;
633 
634 		op1 = insn.opcode.bytes[0];
635 		op2 = insn.opcode.bytes[1];
636 
637 		switch (op1) {
638 		case CALL_INSN_OPCODE:
639 		case JMP32_INSN_OPCODE:
640 			break;
641 
642 		case 0x0f: /* escape */
643 			if (op2 >= 0x80 && op2 <= 0x8f)
644 				break;
645 			fallthrough;
646 		default:
647 			WARN_ON_ONCE(1);
648 			continue;
649 		}
650 
651 		DPRINTK("retpoline at: %pS (%px) len: %d to: %pS",
652 			addr, addr, insn.length,
653 			addr + insn.length + insn.immediate.value);
654 
655 		len = patch_retpoline(addr, &insn, bytes);
656 		if (len == insn.length) {
657 			optimize_nops(bytes, len);
658 			DUMP_BYTES(((u8*)addr),  len, "%px: orig: ", addr);
659 			DUMP_BYTES(((u8*)bytes), len, "%px: repl: ", addr);
660 			text_poke_early(addr, bytes, len);
661 		}
662 	}
663 }
664 
665 #ifdef CONFIG_RETHUNK
666 /*
667  * Rewrite the compiler generated return thunk tail-calls.
668  *
669  * For example, convert:
670  *
671  *   JMP __x86_return_thunk
672  *
673  * into:
674  *
675  *   RET
676  */
patch_return(void * addr,struct insn * insn,u8 * bytes)677 static int patch_return(void *addr, struct insn *insn, u8 *bytes)
678 {
679 	int i = 0;
680 
681 	if (cpu_feature_enabled(X86_FEATURE_RETHUNK))
682 		return -1;
683 
684 	bytes[i++] = RET_INSN_OPCODE;
685 
686 	for (; i < insn->length;)
687 		bytes[i++] = INT3_INSN_OPCODE;
688 
689 	return i;
690 }
691 
apply_returns(s32 * start,s32 * end)692 void __init_or_module noinline apply_returns(s32 *start, s32 *end)
693 {
694 	s32 *s;
695 
696 	for (s = start; s < end; s++) {
697 		void *dest = NULL, *addr = (void *)s + *s;
698 		struct insn insn;
699 		int len, ret;
700 		u8 bytes[16];
701 		u8 op;
702 
703 		ret = insn_decode_kernel(&insn, addr);
704 		if (WARN_ON_ONCE(ret < 0))
705 			continue;
706 
707 		op = insn.opcode.bytes[0];
708 		if (op == JMP32_INSN_OPCODE)
709 			dest = addr + insn.length + insn.immediate.value;
710 
711 		if (__static_call_fixup(addr, op, dest) ||
712 		    WARN_ONCE(dest != &__x86_return_thunk,
713 			      "missing return thunk: %pS-%pS: %*ph",
714 			      addr, dest, 5, addr))
715 			continue;
716 
717 		DPRINTK("return thunk at: %pS (%px) len: %d to: %pS",
718 			addr, addr, insn.length,
719 			addr + insn.length + insn.immediate.value);
720 
721 		len = patch_return(addr, &insn, bytes);
722 		if (len == insn.length) {
723 			DUMP_BYTES(((u8*)addr),  len, "%px: orig: ", addr);
724 			DUMP_BYTES(((u8*)bytes), len, "%px: repl: ", addr);
725 			text_poke_early(addr, bytes, len);
726 		}
727 	}
728 }
729 #else
apply_returns(s32 * start,s32 * end)730 void __init_or_module noinline apply_returns(s32 *start, s32 *end) { }
731 #endif /* CONFIG_RETHUNK */
732 
733 #else /* !RETPOLINES || !CONFIG_STACK_VALIDATION */
734 
apply_retpolines(s32 * start,s32 * end)735 void __init_or_module noinline apply_retpolines(s32 *start, s32 *end) { }
apply_returns(s32 * start,s32 * end)736 void __init_or_module noinline apply_returns(s32 *start, s32 *end) { }
737 
738 #endif /* CONFIG_RETPOLINE && CONFIG_STACK_VALIDATION */
739 
740 #ifdef CONFIG_SMP
alternatives_smp_lock(const s32 * start,const s32 * end,u8 * text,u8 * text_end)741 static void alternatives_smp_lock(const s32 *start, const s32 *end,
742 				  u8 *text, u8 *text_end)
743 {
744 	const s32 *poff;
745 
746 	for (poff = start; poff < end; poff++) {
747 		u8 *ptr = (u8 *)poff + *poff;
748 
749 		if (!*poff || ptr < text || ptr >= text_end)
750 			continue;
751 		/* turn DS segment override prefix into lock prefix */
752 		if (*ptr == 0x3e)
753 			text_poke(ptr, ((unsigned char []){0xf0}), 1);
754 	}
755 }
756 
alternatives_smp_unlock(const s32 * start,const s32 * end,u8 * text,u8 * text_end)757 static void alternatives_smp_unlock(const s32 *start, const s32 *end,
758 				    u8 *text, u8 *text_end)
759 {
760 	const s32 *poff;
761 
762 	for (poff = start; poff < end; poff++) {
763 		u8 *ptr = (u8 *)poff + *poff;
764 
765 		if (!*poff || ptr < text || ptr >= text_end)
766 			continue;
767 		/* turn lock prefix into DS segment override prefix */
768 		if (*ptr == 0xf0)
769 			text_poke(ptr, ((unsigned char []){0x3E}), 1);
770 	}
771 }
772 
773 struct smp_alt_module {
774 	/* what is this ??? */
775 	struct module	*mod;
776 	char		*name;
777 
778 	/* ptrs to lock prefixes */
779 	const s32	*locks;
780 	const s32	*locks_end;
781 
782 	/* .text segment, needed to avoid patching init code ;) */
783 	u8		*text;
784 	u8		*text_end;
785 
786 	struct list_head next;
787 };
788 static LIST_HEAD(smp_alt_modules);
789 static bool uniproc_patched = false;	/* protected by text_mutex */
790 
alternatives_smp_module_add(struct module * mod,char * name,void * locks,void * locks_end,void * text,void * text_end)791 void __init_or_module alternatives_smp_module_add(struct module *mod,
792 						  char *name,
793 						  void *locks, void *locks_end,
794 						  void *text,  void *text_end)
795 {
796 	struct smp_alt_module *smp;
797 
798 	mutex_lock(&text_mutex);
799 	if (!uniproc_patched)
800 		goto unlock;
801 
802 	if (num_possible_cpus() == 1)
803 		/* Don't bother remembering, we'll never have to undo it. */
804 		goto smp_unlock;
805 
806 	smp = kzalloc(sizeof(*smp), GFP_KERNEL);
807 	if (NULL == smp)
808 		/* we'll run the (safe but slow) SMP code then ... */
809 		goto unlock;
810 
811 	smp->mod	= mod;
812 	smp->name	= name;
813 	smp->locks	= locks;
814 	smp->locks_end	= locks_end;
815 	smp->text	= text;
816 	smp->text_end	= text_end;
817 	DPRINTK("locks %p -> %p, text %p -> %p, name %s\n",
818 		smp->locks, smp->locks_end,
819 		smp->text, smp->text_end, smp->name);
820 
821 	list_add_tail(&smp->next, &smp_alt_modules);
822 smp_unlock:
823 	alternatives_smp_unlock(locks, locks_end, text, text_end);
824 unlock:
825 	mutex_unlock(&text_mutex);
826 }
827 
alternatives_smp_module_del(struct module * mod)828 void __init_or_module alternatives_smp_module_del(struct module *mod)
829 {
830 	struct smp_alt_module *item;
831 
832 	mutex_lock(&text_mutex);
833 	list_for_each_entry(item, &smp_alt_modules, next) {
834 		if (mod != item->mod)
835 			continue;
836 		list_del(&item->next);
837 		kfree(item);
838 		break;
839 	}
840 	mutex_unlock(&text_mutex);
841 }
842 
alternatives_enable_smp(void)843 void alternatives_enable_smp(void)
844 {
845 	struct smp_alt_module *mod;
846 
847 	/* Why bother if there are no other CPUs? */
848 	BUG_ON(num_possible_cpus() == 1);
849 
850 	mutex_lock(&text_mutex);
851 
852 	if (uniproc_patched) {
853 		pr_info("switching to SMP code\n");
854 		BUG_ON(num_online_cpus() != 1);
855 		clear_cpu_cap(&boot_cpu_data, X86_FEATURE_UP);
856 		clear_cpu_cap(&cpu_data(0), X86_FEATURE_UP);
857 		list_for_each_entry(mod, &smp_alt_modules, next)
858 			alternatives_smp_lock(mod->locks, mod->locks_end,
859 					      mod->text, mod->text_end);
860 		uniproc_patched = false;
861 	}
862 	mutex_unlock(&text_mutex);
863 }
864 
865 /*
866  * Return 1 if the address range is reserved for SMP-alternatives.
867  * Must hold text_mutex.
868  */
alternatives_text_reserved(void * start,void * end)869 int alternatives_text_reserved(void *start, void *end)
870 {
871 	struct smp_alt_module *mod;
872 	const s32 *poff;
873 	u8 *text_start = start;
874 	u8 *text_end = end;
875 
876 	lockdep_assert_held(&text_mutex);
877 
878 	list_for_each_entry(mod, &smp_alt_modules, next) {
879 		if (mod->text > text_end || mod->text_end < text_start)
880 			continue;
881 		for (poff = mod->locks; poff < mod->locks_end; poff++) {
882 			const u8 *ptr = (const u8 *)poff + *poff;
883 
884 			if (text_start <= ptr && text_end > ptr)
885 				return 1;
886 		}
887 	}
888 
889 	return 0;
890 }
891 #endif /* CONFIG_SMP */
892 
893 #ifdef CONFIG_PARAVIRT
apply_paravirt(struct paravirt_patch_site * start,struct paravirt_patch_site * end)894 void __init_or_module apply_paravirt(struct paravirt_patch_site *start,
895 				     struct paravirt_patch_site *end)
896 {
897 	struct paravirt_patch_site *p;
898 	char insn_buff[MAX_PATCH_LEN];
899 
900 	for (p = start; p < end; p++) {
901 		unsigned int used;
902 
903 		BUG_ON(p->len > MAX_PATCH_LEN);
904 		/* prep the buffer with the original instructions */
905 		memcpy(insn_buff, p->instr, p->len);
906 		used = pv_ops.init.patch(p->type, insn_buff, (unsigned long)p->instr, p->len);
907 
908 		BUG_ON(used > p->len);
909 
910 		/* Pad the rest with nops */
911 		add_nops(insn_buff + used, p->len - used);
912 		text_poke_early(p->instr, insn_buff, p->len);
913 	}
914 }
915 extern struct paravirt_patch_site __start_parainstructions[],
916 	__stop_parainstructions[];
917 #endif	/* CONFIG_PARAVIRT */
918 
919 /*
920  * Self-test for the INT3 based CALL emulation code.
921  *
922  * This exercises int3_emulate_call() to make sure INT3 pt_regs are set up
923  * properly and that there is a stack gap between the INT3 frame and the
924  * previous context. Without this gap doing a virtual PUSH on the interrupted
925  * stack would corrupt the INT3 IRET frame.
926  *
927  * See entry_{32,64}.S for more details.
928  */
929 
930 /*
931  * We define the int3_magic() function in assembly to control the calling
932  * convention such that we can 'call' it from assembly.
933  */
934 
935 extern void int3_magic(unsigned int *ptr); /* defined in asm */
936 
937 asm (
938 "	.pushsection	.init.text, \"ax\", @progbits\n"
939 "	.type		int3_magic, @function\n"
940 "int3_magic:\n"
941 "	movl	$1, (%" _ASM_ARG1 ")\n"
942 	ASM_RET
943 "	.size		int3_magic, .-int3_magic\n"
944 "	.popsection\n"
945 );
946 
947 extern __initdata unsigned long int3_selftest_ip; /* defined in asm below */
948 
949 static int __init
int3_exception_notify(struct notifier_block * self,unsigned long val,void * data)950 int3_exception_notify(struct notifier_block *self, unsigned long val, void *data)
951 {
952 	struct die_args *args = data;
953 	struct pt_regs *regs = args->regs;
954 
955 	if (!regs || user_mode(regs))
956 		return NOTIFY_DONE;
957 
958 	if (val != DIE_INT3)
959 		return NOTIFY_DONE;
960 
961 	if (regs->ip - INT3_INSN_SIZE != int3_selftest_ip)
962 		return NOTIFY_DONE;
963 
964 	int3_emulate_call(regs, (unsigned long)&int3_magic);
965 	return NOTIFY_STOP;
966 }
967 
int3_selftest(void)968 static void __init int3_selftest(void)
969 {
970 	static __initdata struct notifier_block int3_exception_nb = {
971 		.notifier_call	= int3_exception_notify,
972 		.priority	= INT_MAX-1, /* last */
973 	};
974 	unsigned int val = 0;
975 
976 	BUG_ON(register_die_notifier(&int3_exception_nb));
977 
978 	/*
979 	 * Basically: int3_magic(&val); but really complicated :-)
980 	 *
981 	 * Stick the address of the INT3 instruction into int3_selftest_ip,
982 	 * then trigger the INT3, padded with NOPs to match a CALL instruction
983 	 * length.
984 	 */
985 	asm volatile ("1: int3; nop; nop; nop; nop\n\t"
986 		      ".pushsection .init.data,\"aw\"\n\t"
987 		      ".align " __ASM_SEL(4, 8) "\n\t"
988 		      ".type int3_selftest_ip, @object\n\t"
989 		      ".size int3_selftest_ip, " __ASM_SEL(4, 8) "\n\t"
990 		      "int3_selftest_ip:\n\t"
991 		      __ASM_SEL(.long, .quad) " 1b\n\t"
992 		      ".popsection\n\t"
993 		      : ASM_CALL_CONSTRAINT
994 		      : __ASM_SEL_RAW(a, D) (&val)
995 		      : "memory");
996 
997 	BUG_ON(val != 1);
998 
999 	unregister_die_notifier(&int3_exception_nb);
1000 }
1001 
alternative_instructions(void)1002 void __init alternative_instructions(void)
1003 {
1004 	int3_selftest();
1005 
1006 	/*
1007 	 * The patching is not fully atomic, so try to avoid local
1008 	 * interruptions that might execute the to be patched code.
1009 	 * Other CPUs are not running.
1010 	 */
1011 	stop_nmi();
1012 
1013 	/*
1014 	 * Don't stop machine check exceptions while patching.
1015 	 * MCEs only happen when something got corrupted and in this
1016 	 * case we must do something about the corruption.
1017 	 * Ignoring it is worse than an unlikely patching race.
1018 	 * Also machine checks tend to be broadcast and if one CPU
1019 	 * goes into machine check the others follow quickly, so we don't
1020 	 * expect a machine check to cause undue problems during to code
1021 	 * patching.
1022 	 */
1023 
1024 	/*
1025 	 * Rewrite the retpolines, must be done before alternatives since
1026 	 * those can rewrite the retpoline thunks.
1027 	 */
1028 	apply_retpolines(__retpoline_sites, __retpoline_sites_end);
1029 	apply_returns(__return_sites, __return_sites_end);
1030 
1031 	apply_alternatives(__alt_instructions, __alt_instructions_end);
1032 
1033 #ifdef CONFIG_SMP
1034 	/* Patch to UP if other cpus not imminent. */
1035 	if (!noreplace_smp && (num_present_cpus() == 1 || setup_max_cpus <= 1)) {
1036 		uniproc_patched = true;
1037 		alternatives_smp_module_add(NULL, "core kernel",
1038 					    __smp_locks, __smp_locks_end,
1039 					    _text, _etext);
1040 	}
1041 
1042 	if (!uniproc_patched || num_possible_cpus() == 1) {
1043 		free_init_pages("SMP alternatives",
1044 				(unsigned long)__smp_locks,
1045 				(unsigned long)__smp_locks_end);
1046 	}
1047 #endif
1048 
1049 	apply_paravirt(__parainstructions, __parainstructions_end);
1050 
1051 	restart_nmi();
1052 	alternatives_patched = 1;
1053 }
1054 
1055 /**
1056  * text_poke_early - Update instructions on a live kernel at boot time
1057  * @addr: address to modify
1058  * @opcode: source of the copy
1059  * @len: length to copy
1060  *
1061  * When you use this code to patch more than one byte of an instruction
1062  * you need to make sure that other CPUs cannot execute this code in parallel.
1063  * Also no thread must be currently preempted in the middle of these
1064  * instructions. And on the local CPU you need to be protected against NMI or
1065  * MCE handlers seeing an inconsistent instruction while you patch.
1066  */
text_poke_early(void * addr,const void * opcode,size_t len)1067 void __init_or_module text_poke_early(void *addr, const void *opcode,
1068 				      size_t len)
1069 {
1070 	unsigned long flags;
1071 
1072 	if (boot_cpu_has(X86_FEATURE_NX) &&
1073 	    is_module_text_address((unsigned long)addr)) {
1074 		/*
1075 		 * Modules text is marked initially as non-executable, so the
1076 		 * code cannot be running and speculative code-fetches are
1077 		 * prevented. Just change the code.
1078 		 */
1079 		memcpy(addr, opcode, len);
1080 	} else {
1081 		local_irq_save(flags);
1082 		memcpy(addr, opcode, len);
1083 		local_irq_restore(flags);
1084 		sync_core();
1085 
1086 		/*
1087 		 * Could also do a CLFLUSH here to speed up CPU recovery; but
1088 		 * that causes hangs on some VIA CPUs.
1089 		 */
1090 	}
1091 }
1092 
1093 typedef struct {
1094 	struct mm_struct *mm;
1095 } temp_mm_state_t;
1096 
1097 /*
1098  * Using a temporary mm allows to set temporary mappings that are not accessible
1099  * by other CPUs. Such mappings are needed to perform sensitive memory writes
1100  * that override the kernel memory protections (e.g., W^X), without exposing the
1101  * temporary page-table mappings that are required for these write operations to
1102  * other CPUs. Using a temporary mm also allows to avoid TLB shootdowns when the
1103  * mapping is torn down.
1104  *
1105  * Context: The temporary mm needs to be used exclusively by a single core. To
1106  *          harden security IRQs must be disabled while the temporary mm is
1107  *          loaded, thereby preventing interrupt handler bugs from overriding
1108  *          the kernel memory protection.
1109  */
use_temporary_mm(struct mm_struct * mm)1110 static inline temp_mm_state_t use_temporary_mm(struct mm_struct *mm)
1111 {
1112 	temp_mm_state_t temp_state;
1113 
1114 	lockdep_assert_irqs_disabled();
1115 
1116 	/*
1117 	 * Make sure not to be in TLB lazy mode, as otherwise we'll end up
1118 	 * with a stale address space WITHOUT being in lazy mode after
1119 	 * restoring the previous mm.
1120 	 */
1121 	if (this_cpu_read(cpu_tlbstate.is_lazy))
1122 		leave_mm(smp_processor_id());
1123 
1124 	temp_state.mm = this_cpu_read(cpu_tlbstate.loaded_mm);
1125 	switch_mm_irqs_off(NULL, mm, current);
1126 
1127 	/*
1128 	 * If breakpoints are enabled, disable them while the temporary mm is
1129 	 * used. Userspace might set up watchpoints on addresses that are used
1130 	 * in the temporary mm, which would lead to wrong signals being sent or
1131 	 * crashes.
1132 	 *
1133 	 * Note that breakpoints are not disabled selectively, which also causes
1134 	 * kernel breakpoints (e.g., perf's) to be disabled. This might be
1135 	 * undesirable, but still seems reasonable as the code that runs in the
1136 	 * temporary mm should be short.
1137 	 */
1138 	if (hw_breakpoint_active())
1139 		hw_breakpoint_disable();
1140 
1141 	return temp_state;
1142 }
1143 
unuse_temporary_mm(temp_mm_state_t prev_state)1144 static inline void unuse_temporary_mm(temp_mm_state_t prev_state)
1145 {
1146 	lockdep_assert_irqs_disabled();
1147 	switch_mm_irqs_off(NULL, prev_state.mm, current);
1148 
1149 	/*
1150 	 * Restore the breakpoints if they were disabled before the temporary mm
1151 	 * was loaded.
1152 	 */
1153 	if (hw_breakpoint_active())
1154 		hw_breakpoint_restore();
1155 }
1156 
1157 __ro_after_init struct mm_struct *poking_mm;
1158 __ro_after_init unsigned long poking_addr;
1159 
__text_poke(void * addr,const void * opcode,size_t len)1160 static void *__text_poke(void *addr, const void *opcode, size_t len)
1161 {
1162 	bool cross_page_boundary = offset_in_page(addr) + len > PAGE_SIZE;
1163 	struct page *pages[2] = {NULL};
1164 	temp_mm_state_t prev;
1165 	unsigned long flags;
1166 	pte_t pte, *ptep;
1167 	spinlock_t *ptl;
1168 	pgprot_t pgprot;
1169 
1170 	/*
1171 	 * While boot memory allocator is running we cannot use struct pages as
1172 	 * they are not yet initialized. There is no way to recover.
1173 	 */
1174 	BUG_ON(!after_bootmem);
1175 
1176 	if (!core_kernel_text((unsigned long)addr)) {
1177 		pages[0] = vmalloc_to_page(addr);
1178 		if (cross_page_boundary)
1179 			pages[1] = vmalloc_to_page(addr + PAGE_SIZE);
1180 	} else {
1181 		pages[0] = virt_to_page(addr);
1182 		WARN_ON(!PageReserved(pages[0]));
1183 		if (cross_page_boundary)
1184 			pages[1] = virt_to_page(addr + PAGE_SIZE);
1185 	}
1186 	/*
1187 	 * If something went wrong, crash and burn since recovery paths are not
1188 	 * implemented.
1189 	 */
1190 	BUG_ON(!pages[0] || (cross_page_boundary && !pages[1]));
1191 
1192 	/*
1193 	 * Map the page without the global bit, as TLB flushing is done with
1194 	 * flush_tlb_mm_range(), which is intended for non-global PTEs.
1195 	 */
1196 	pgprot = __pgprot(pgprot_val(PAGE_KERNEL) & ~_PAGE_GLOBAL);
1197 
1198 	/*
1199 	 * The lock is not really needed, but this allows to avoid open-coding.
1200 	 */
1201 	ptep = get_locked_pte(poking_mm, poking_addr, &ptl);
1202 
1203 	/*
1204 	 * This must not fail; preallocated in poking_init().
1205 	 */
1206 	VM_BUG_ON(!ptep);
1207 
1208 	local_irq_save(flags);
1209 
1210 	pte = mk_pte(pages[0], pgprot);
1211 	set_pte_at(poking_mm, poking_addr, ptep, pte);
1212 
1213 	if (cross_page_boundary) {
1214 		pte = mk_pte(pages[1], pgprot);
1215 		set_pte_at(poking_mm, poking_addr + PAGE_SIZE, ptep + 1, pte);
1216 	}
1217 
1218 	/*
1219 	 * Loading the temporary mm behaves as a compiler barrier, which
1220 	 * guarantees that the PTE will be set at the time memcpy() is done.
1221 	 */
1222 	prev = use_temporary_mm(poking_mm);
1223 
1224 	kasan_disable_current();
1225 	memcpy((u8 *)poking_addr + offset_in_page(addr), opcode, len);
1226 	kasan_enable_current();
1227 
1228 	/*
1229 	 * Ensure that the PTE is only cleared after the instructions of memcpy
1230 	 * were issued by using a compiler barrier.
1231 	 */
1232 	barrier();
1233 
1234 	pte_clear(poking_mm, poking_addr, ptep);
1235 	if (cross_page_boundary)
1236 		pte_clear(poking_mm, poking_addr + PAGE_SIZE, ptep + 1);
1237 
1238 	/*
1239 	 * Loading the previous page-table hierarchy requires a serializing
1240 	 * instruction that already allows the core to see the updated version.
1241 	 * Xen-PV is assumed to serialize execution in a similar manner.
1242 	 */
1243 	unuse_temporary_mm(prev);
1244 
1245 	/*
1246 	 * Flushing the TLB might involve IPIs, which would require enabled
1247 	 * IRQs, but not if the mm is not used, as it is in this point.
1248 	 */
1249 	flush_tlb_mm_range(poking_mm, poking_addr, poking_addr +
1250 			   (cross_page_boundary ? 2 : 1) * PAGE_SIZE,
1251 			   PAGE_SHIFT, false);
1252 
1253 	/*
1254 	 * If the text does not match what we just wrote then something is
1255 	 * fundamentally screwy; there's nothing we can really do about that.
1256 	 */
1257 	BUG_ON(memcmp(addr, opcode, len));
1258 
1259 	local_irq_restore(flags);
1260 	pte_unmap_unlock(ptep, ptl);
1261 	return addr;
1262 }
1263 
1264 /**
1265  * text_poke - Update instructions on a live kernel
1266  * @addr: address to modify
1267  * @opcode: source of the copy
1268  * @len: length to copy
1269  *
1270  * Only atomic text poke/set should be allowed when not doing early patching.
1271  * It means the size must be writable atomically and the address must be aligned
1272  * in a way that permits an atomic write. It also makes sure we fit on a single
1273  * page.
1274  *
1275  * Note that the caller must ensure that if the modified code is part of a
1276  * module, the module would not be removed during poking. This can be achieved
1277  * by registering a module notifier, and ordering module removal and patching
1278  * trough a mutex.
1279  */
text_poke(void * addr,const void * opcode,size_t len)1280 void *text_poke(void *addr, const void *opcode, size_t len)
1281 {
1282 	lockdep_assert_held(&text_mutex);
1283 
1284 	return __text_poke(addr, opcode, len);
1285 }
1286 
1287 /**
1288  * text_poke_kgdb - Update instructions on a live kernel by kgdb
1289  * @addr: address to modify
1290  * @opcode: source of the copy
1291  * @len: length to copy
1292  *
1293  * Only atomic text poke/set should be allowed when not doing early patching.
1294  * It means the size must be writable atomically and the address must be aligned
1295  * in a way that permits an atomic write. It also makes sure we fit on a single
1296  * page.
1297  *
1298  * Context: should only be used by kgdb, which ensures no other core is running,
1299  *	    despite the fact it does not hold the text_mutex.
1300  */
text_poke_kgdb(void * addr,const void * opcode,size_t len)1301 void *text_poke_kgdb(void *addr, const void *opcode, size_t len)
1302 {
1303 	return __text_poke(addr, opcode, len);
1304 }
1305 
do_sync_core(void * info)1306 static void do_sync_core(void *info)
1307 {
1308 	sync_core();
1309 }
1310 
text_poke_sync(void)1311 void text_poke_sync(void)
1312 {
1313 	on_each_cpu(do_sync_core, NULL, 1);
1314 }
1315 
1316 struct text_poke_loc {
1317 	/* addr := _stext + rel_addr */
1318 	s32 rel_addr;
1319 	s32 disp;
1320 	u8 len;
1321 	u8 opcode;
1322 	const u8 text[POKE_MAX_OPCODE_SIZE];
1323 	/* see text_poke_bp_batch() */
1324 	u8 old;
1325 };
1326 
1327 struct bp_patching_desc {
1328 	struct text_poke_loc *vec;
1329 	int nr_entries;
1330 	atomic_t refs;
1331 };
1332 
1333 static struct bp_patching_desc bp_desc;
1334 
1335 static __always_inline
try_get_desc(void)1336 struct bp_patching_desc *try_get_desc(void)
1337 {
1338 	struct bp_patching_desc *desc = &bp_desc;
1339 
1340 	if (!arch_atomic_inc_not_zero(&desc->refs))
1341 		return NULL;
1342 
1343 	return desc;
1344 }
1345 
put_desc(void)1346 static __always_inline void put_desc(void)
1347 {
1348 	struct bp_patching_desc *desc = &bp_desc;
1349 
1350 	smp_mb__before_atomic();
1351 	arch_atomic_dec(&desc->refs);
1352 }
1353 
text_poke_addr(struct text_poke_loc * tp)1354 static __always_inline void *text_poke_addr(struct text_poke_loc *tp)
1355 {
1356 	return _stext + tp->rel_addr;
1357 }
1358 
patch_cmp(const void * key,const void * elt)1359 static __always_inline int patch_cmp(const void *key, const void *elt)
1360 {
1361 	struct text_poke_loc *tp = (struct text_poke_loc *) elt;
1362 
1363 	if (key < text_poke_addr(tp))
1364 		return -1;
1365 	if (key > text_poke_addr(tp))
1366 		return 1;
1367 	return 0;
1368 }
1369 
poke_int3_handler(struct pt_regs * regs)1370 noinstr int poke_int3_handler(struct pt_regs *regs)
1371 {
1372 	struct bp_patching_desc *desc;
1373 	struct text_poke_loc *tp;
1374 	int ret = 0;
1375 	void *ip;
1376 
1377 	if (user_mode(regs))
1378 		return 0;
1379 
1380 	/*
1381 	 * Having observed our INT3 instruction, we now must observe
1382 	 * bp_desc with non-zero refcount:
1383 	 *
1384 	 *	bp_desc.refs = 1		INT3
1385 	 *	WMB				RMB
1386 	 *	write INT3			if (bp_desc.refs != 0)
1387 	 */
1388 	smp_rmb();
1389 
1390 	desc = try_get_desc();
1391 	if (!desc)
1392 		return 0;
1393 
1394 	/*
1395 	 * Discount the INT3. See text_poke_bp_batch().
1396 	 */
1397 	ip = (void *) regs->ip - INT3_INSN_SIZE;
1398 
1399 	/*
1400 	 * Skip the binary search if there is a single member in the vector.
1401 	 */
1402 	if (unlikely(desc->nr_entries > 1)) {
1403 		tp = __inline_bsearch(ip, desc->vec, desc->nr_entries,
1404 				      sizeof(struct text_poke_loc),
1405 				      patch_cmp);
1406 		if (!tp)
1407 			goto out_put;
1408 	} else {
1409 		tp = desc->vec;
1410 		if (text_poke_addr(tp) != ip)
1411 			goto out_put;
1412 	}
1413 
1414 	ip += tp->len;
1415 
1416 	switch (tp->opcode) {
1417 	case INT3_INSN_OPCODE:
1418 		/*
1419 		 * Someone poked an explicit INT3, they'll want to handle it,
1420 		 * do not consume.
1421 		 */
1422 		goto out_put;
1423 
1424 	case RET_INSN_OPCODE:
1425 		int3_emulate_ret(regs);
1426 		break;
1427 
1428 	case CALL_INSN_OPCODE:
1429 		int3_emulate_call(regs, (long)ip + tp->disp);
1430 		break;
1431 
1432 	case JMP32_INSN_OPCODE:
1433 	case JMP8_INSN_OPCODE:
1434 		int3_emulate_jmp(regs, (long)ip + tp->disp);
1435 		break;
1436 
1437 	default:
1438 		BUG();
1439 	}
1440 
1441 	ret = 1;
1442 
1443 out_put:
1444 	put_desc();
1445 	return ret;
1446 }
1447 
1448 #define TP_VEC_MAX (PAGE_SIZE / sizeof(struct text_poke_loc))
1449 static struct text_poke_loc tp_vec[TP_VEC_MAX];
1450 static int tp_vec_nr;
1451 
1452 /**
1453  * text_poke_bp_batch() -- update instructions on live kernel on SMP
1454  * @tp:			vector of instructions to patch
1455  * @nr_entries:		number of entries in the vector
1456  *
1457  * Modify multi-byte instruction by using int3 breakpoint on SMP.
1458  * We completely avoid stop_machine() here, and achieve the
1459  * synchronization using int3 breakpoint.
1460  *
1461  * The way it is done:
1462  *	- For each entry in the vector:
1463  *		- add a int3 trap to the address that will be patched
1464  *	- sync cores
1465  *	- For each entry in the vector:
1466  *		- update all but the first byte of the patched range
1467  *	- sync cores
1468  *	- For each entry in the vector:
1469  *		- replace the first byte (int3) by the first byte of
1470  *		  replacing opcode
1471  *	- sync cores
1472  */
text_poke_bp_batch(struct text_poke_loc * tp,unsigned int nr_entries)1473 static void text_poke_bp_batch(struct text_poke_loc *tp, unsigned int nr_entries)
1474 {
1475 	unsigned char int3 = INT3_INSN_OPCODE;
1476 	unsigned int i;
1477 	int do_sync;
1478 
1479 	lockdep_assert_held(&text_mutex);
1480 
1481 	bp_desc.vec = tp;
1482 	bp_desc.nr_entries = nr_entries;
1483 
1484 	/*
1485 	 * Corresponds to the implicit memory barrier in try_get_desc() to
1486 	 * ensure reading a non-zero refcount provides up to date bp_desc data.
1487 	 */
1488 	atomic_set_release(&bp_desc.refs, 1);
1489 
1490 	/*
1491 	 * Corresponding read barrier in int3 notifier for making sure the
1492 	 * nr_entries and handler are correctly ordered wrt. patching.
1493 	 */
1494 	smp_wmb();
1495 
1496 	/*
1497 	 * First step: add a int3 trap to the address that will be patched.
1498 	 */
1499 	for (i = 0; i < nr_entries; i++) {
1500 		tp[i].old = *(u8 *)text_poke_addr(&tp[i]);
1501 		text_poke(text_poke_addr(&tp[i]), &int3, INT3_INSN_SIZE);
1502 	}
1503 
1504 	text_poke_sync();
1505 
1506 	/*
1507 	 * Second step: update all but the first byte of the patched range.
1508 	 */
1509 	for (do_sync = 0, i = 0; i < nr_entries; i++) {
1510 		u8 old[POKE_MAX_OPCODE_SIZE] = { tp[i].old, };
1511 		int len = tp[i].len;
1512 
1513 		if (len - INT3_INSN_SIZE > 0) {
1514 			memcpy(old + INT3_INSN_SIZE,
1515 			       text_poke_addr(&tp[i]) + INT3_INSN_SIZE,
1516 			       len - INT3_INSN_SIZE);
1517 			text_poke(text_poke_addr(&tp[i]) + INT3_INSN_SIZE,
1518 				  (const char *)tp[i].text + INT3_INSN_SIZE,
1519 				  len - INT3_INSN_SIZE);
1520 			do_sync++;
1521 		}
1522 
1523 		/*
1524 		 * Emit a perf event to record the text poke, primarily to
1525 		 * support Intel PT decoding which must walk the executable code
1526 		 * to reconstruct the trace. The flow up to here is:
1527 		 *   - write INT3 byte
1528 		 *   - IPI-SYNC
1529 		 *   - write instruction tail
1530 		 * At this point the actual control flow will be through the
1531 		 * INT3 and handler and not hit the old or new instruction.
1532 		 * Intel PT outputs FUP/TIP packets for the INT3, so the flow
1533 		 * can still be decoded. Subsequently:
1534 		 *   - emit RECORD_TEXT_POKE with the new instruction
1535 		 *   - IPI-SYNC
1536 		 *   - write first byte
1537 		 *   - IPI-SYNC
1538 		 * So before the text poke event timestamp, the decoder will see
1539 		 * either the old instruction flow or FUP/TIP of INT3. After the
1540 		 * text poke event timestamp, the decoder will see either the
1541 		 * new instruction flow or FUP/TIP of INT3. Thus decoders can
1542 		 * use the timestamp as the point at which to modify the
1543 		 * executable code.
1544 		 * The old instruction is recorded so that the event can be
1545 		 * processed forwards or backwards.
1546 		 */
1547 		perf_event_text_poke(text_poke_addr(&tp[i]), old, len,
1548 				     tp[i].text, len);
1549 	}
1550 
1551 	if (do_sync) {
1552 		/*
1553 		 * According to Intel, this core syncing is very likely
1554 		 * not necessary and we'd be safe even without it. But
1555 		 * better safe than sorry (plus there's not only Intel).
1556 		 */
1557 		text_poke_sync();
1558 	}
1559 
1560 	/*
1561 	 * Third step: replace the first byte (int3) by the first byte of
1562 	 * replacing opcode.
1563 	 */
1564 	for (do_sync = 0, i = 0; i < nr_entries; i++) {
1565 		if (tp[i].text[0] == INT3_INSN_OPCODE)
1566 			continue;
1567 
1568 		text_poke(text_poke_addr(&tp[i]), tp[i].text, INT3_INSN_SIZE);
1569 		do_sync++;
1570 	}
1571 
1572 	if (do_sync)
1573 		text_poke_sync();
1574 
1575 	/*
1576 	 * Remove and wait for refs to be zero.
1577 	 */
1578 	if (!atomic_dec_and_test(&bp_desc.refs))
1579 		atomic_cond_read_acquire(&bp_desc.refs, !VAL);
1580 }
1581 
text_poke_loc_init(struct text_poke_loc * tp,void * addr,const void * opcode,size_t len,const void * emulate)1582 static void text_poke_loc_init(struct text_poke_loc *tp, void *addr,
1583 			       const void *opcode, size_t len, const void *emulate)
1584 {
1585 	struct insn insn;
1586 	int ret, i;
1587 
1588 	memcpy((void *)tp->text, opcode, len);
1589 	if (!emulate)
1590 		emulate = opcode;
1591 
1592 	ret = insn_decode_kernel(&insn, emulate);
1593 	BUG_ON(ret < 0);
1594 
1595 	tp->rel_addr = addr - (void *)_stext;
1596 	tp->len = len;
1597 	tp->opcode = insn.opcode.bytes[0];
1598 
1599 	switch (tp->opcode) {
1600 	case RET_INSN_OPCODE:
1601 	case JMP32_INSN_OPCODE:
1602 	case JMP8_INSN_OPCODE:
1603 		/*
1604 		 * Control flow instructions without implied execution of the
1605 		 * next instruction can be padded with INT3.
1606 		 */
1607 		for (i = insn.length; i < len; i++)
1608 			BUG_ON(tp->text[i] != INT3_INSN_OPCODE);
1609 		break;
1610 
1611 	default:
1612 		BUG_ON(len != insn.length);
1613 	};
1614 
1615 
1616 	switch (tp->opcode) {
1617 	case INT3_INSN_OPCODE:
1618 	case RET_INSN_OPCODE:
1619 		break;
1620 
1621 	case CALL_INSN_OPCODE:
1622 	case JMP32_INSN_OPCODE:
1623 	case JMP8_INSN_OPCODE:
1624 		tp->disp = insn.immediate.value;
1625 		break;
1626 
1627 	default: /* assume NOP */
1628 		switch (len) {
1629 		case 2: /* NOP2 -- emulate as JMP8+0 */
1630 			BUG_ON(memcmp(emulate, ideal_nops[len], len));
1631 			tp->opcode = JMP8_INSN_OPCODE;
1632 			tp->disp = 0;
1633 			break;
1634 
1635 		case 5: /* NOP5 -- emulate as JMP32+0 */
1636 			BUG_ON(memcmp(emulate, ideal_nops[NOP_ATOMIC5], len));
1637 			tp->opcode = JMP32_INSN_OPCODE;
1638 			tp->disp = 0;
1639 			break;
1640 
1641 		default: /* unknown instruction */
1642 			BUG();
1643 		}
1644 		break;
1645 	}
1646 }
1647 
1648 /*
1649  * We hard rely on the tp_vec being ordered; ensure this is so by flushing
1650  * early if needed.
1651  */
tp_order_fail(void * addr)1652 static bool tp_order_fail(void *addr)
1653 {
1654 	struct text_poke_loc *tp;
1655 
1656 	if (!tp_vec_nr)
1657 		return false;
1658 
1659 	if (!addr) /* force */
1660 		return true;
1661 
1662 	tp = &tp_vec[tp_vec_nr - 1];
1663 	if ((unsigned long)text_poke_addr(tp) > (unsigned long)addr)
1664 		return true;
1665 
1666 	return false;
1667 }
1668 
text_poke_flush(void * addr)1669 static void text_poke_flush(void *addr)
1670 {
1671 	if (tp_vec_nr == TP_VEC_MAX || tp_order_fail(addr)) {
1672 		text_poke_bp_batch(tp_vec, tp_vec_nr);
1673 		tp_vec_nr = 0;
1674 	}
1675 }
1676 
text_poke_finish(void)1677 void text_poke_finish(void)
1678 {
1679 	text_poke_flush(NULL);
1680 }
1681 
text_poke_queue(void * addr,const void * opcode,size_t len,const void * emulate)1682 void __ref text_poke_queue(void *addr, const void *opcode, size_t len, const void *emulate)
1683 {
1684 	struct text_poke_loc *tp;
1685 
1686 	if (unlikely(system_state == SYSTEM_BOOTING)) {
1687 		text_poke_early(addr, opcode, len);
1688 		return;
1689 	}
1690 
1691 	text_poke_flush(addr);
1692 
1693 	tp = &tp_vec[tp_vec_nr++];
1694 	text_poke_loc_init(tp, addr, opcode, len, emulate);
1695 }
1696 
1697 /**
1698  * text_poke_bp() -- update instructions on live kernel on SMP
1699  * @addr:	address to patch
1700  * @opcode:	opcode of new instruction
1701  * @len:	length to copy
1702  * @handler:	address to jump to when the temporary breakpoint is hit
1703  *
1704  * Update a single instruction with the vector in the stack, avoiding
1705  * dynamically allocated memory. This function should be used when it is
1706  * not possible to allocate memory.
1707  */
text_poke_bp(void * addr,const void * opcode,size_t len,const void * emulate)1708 void __ref text_poke_bp(void *addr, const void *opcode, size_t len, const void *emulate)
1709 {
1710 	struct text_poke_loc tp;
1711 
1712 	if (unlikely(system_state == SYSTEM_BOOTING)) {
1713 		text_poke_early(addr, opcode, len);
1714 		return;
1715 	}
1716 
1717 	text_poke_loc_init(&tp, addr, opcode, len, emulate);
1718 	text_poke_bp_batch(&tp, 1);
1719 }
1720