• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 #define pr_fmt(fmt) "SMP alternatives: " fmt
2 
3 #include <linux/module.h>
4 #include <linux/sched.h>
5 #include <linux/mutex.h>
6 #include <linux/list.h>
7 #include <linux/stringify.h>
8 #include <linux/mm.h>
9 #include <linux/vmalloc.h>
10 #include <linux/memory.h>
11 #include <linux/stop_machine.h>
12 #include <linux/slab.h>
13 #include <linux/kdebug.h>
14 #include <asm/alternative.h>
15 #include <asm/sections.h>
16 #include <asm/pgtable.h>
17 #include <asm/mce.h>
18 #include <asm/nmi.h>
19 #include <asm/cacheflush.h>
20 #include <asm/tlbflush.h>
21 #include <asm/io.h>
22 #include <asm/fixmap.h>
23 
24 int __read_mostly alternatives_patched;
25 
26 EXPORT_SYMBOL_GPL(alternatives_patched);
27 
28 #define MAX_PATCH_LEN (255-1)
29 
30 static int __initdata_or_module debug_alternative;
31 
debug_alt(char * str)32 static int __init debug_alt(char *str)
33 {
34 	debug_alternative = 1;
35 	return 1;
36 }
37 __setup("debug-alternative", debug_alt);
38 
39 static int noreplace_smp;
40 
setup_noreplace_smp(char * str)41 static int __init setup_noreplace_smp(char *str)
42 {
43 	noreplace_smp = 1;
44 	return 1;
45 }
46 __setup("noreplace-smp", setup_noreplace_smp);
47 
48 #define DPRINTK(fmt, args...)						\
49 do {									\
50 	if (debug_alternative)						\
51 		printk(KERN_DEBUG "%s: " fmt "\n", __func__, ##args);	\
52 } while (0)
53 
54 #define DUMP_BYTES(buf, len, fmt, args...)				\
55 do {									\
56 	if (unlikely(debug_alternative)) {				\
57 		int j;							\
58 									\
59 		if (!(len))						\
60 			break;						\
61 									\
62 		printk(KERN_DEBUG fmt, ##args);				\
63 		for (j = 0; j < (len) - 1; j++)				\
64 			printk(KERN_CONT "%02hhx ", buf[j]);		\
65 		printk(KERN_CONT "%02hhx\n", buf[j]);			\
66 	}								\
67 } while (0)
68 
69 /*
70  * Each GENERIC_NOPX is of X bytes, and defined as an array of bytes
71  * that correspond to that nop. Getting from one nop to the next, we
72  * add to the array the offset that is equal to the sum of all sizes of
73  * nops preceding the one we are after.
74  *
75  * Note: The GENERIC_NOP5_ATOMIC is at the end, as it breaks the
76  * nice symmetry of sizes of the previous nops.
77  */
78 #if defined(GENERIC_NOP1) && !defined(CONFIG_X86_64)
79 static const unsigned char intelnops[] =
80 {
81 	GENERIC_NOP1,
82 	GENERIC_NOP2,
83 	GENERIC_NOP3,
84 	GENERIC_NOP4,
85 	GENERIC_NOP5,
86 	GENERIC_NOP6,
87 	GENERIC_NOP7,
88 	GENERIC_NOP8,
89 	GENERIC_NOP5_ATOMIC
90 };
91 static const unsigned char * const intel_nops[ASM_NOP_MAX+2] =
92 {
93 	NULL,
94 	intelnops,
95 	intelnops + 1,
96 	intelnops + 1 + 2,
97 	intelnops + 1 + 2 + 3,
98 	intelnops + 1 + 2 + 3 + 4,
99 	intelnops + 1 + 2 + 3 + 4 + 5,
100 	intelnops + 1 + 2 + 3 + 4 + 5 + 6,
101 	intelnops + 1 + 2 + 3 + 4 + 5 + 6 + 7,
102 	intelnops + 1 + 2 + 3 + 4 + 5 + 6 + 7 + 8,
103 };
104 #endif
105 
106 #ifdef K8_NOP1
107 static const unsigned char k8nops[] =
108 {
109 	K8_NOP1,
110 	K8_NOP2,
111 	K8_NOP3,
112 	K8_NOP4,
113 	K8_NOP5,
114 	K8_NOP6,
115 	K8_NOP7,
116 	K8_NOP8,
117 	K8_NOP5_ATOMIC
118 };
119 static const unsigned char * const k8_nops[ASM_NOP_MAX+2] =
120 {
121 	NULL,
122 	k8nops,
123 	k8nops + 1,
124 	k8nops + 1 + 2,
125 	k8nops + 1 + 2 + 3,
126 	k8nops + 1 + 2 + 3 + 4,
127 	k8nops + 1 + 2 + 3 + 4 + 5,
128 	k8nops + 1 + 2 + 3 + 4 + 5 + 6,
129 	k8nops + 1 + 2 + 3 + 4 + 5 + 6 + 7,
130 	k8nops + 1 + 2 + 3 + 4 + 5 + 6 + 7 + 8,
131 };
132 #endif
133 
134 #if defined(K7_NOP1) && !defined(CONFIG_X86_64)
135 static const unsigned char k7nops[] =
136 {
137 	K7_NOP1,
138 	K7_NOP2,
139 	K7_NOP3,
140 	K7_NOP4,
141 	K7_NOP5,
142 	K7_NOP6,
143 	K7_NOP7,
144 	K7_NOP8,
145 	K7_NOP5_ATOMIC
146 };
147 static const unsigned char * const k7_nops[ASM_NOP_MAX+2] =
148 {
149 	NULL,
150 	k7nops,
151 	k7nops + 1,
152 	k7nops + 1 + 2,
153 	k7nops + 1 + 2 + 3,
154 	k7nops + 1 + 2 + 3 + 4,
155 	k7nops + 1 + 2 + 3 + 4 + 5,
156 	k7nops + 1 + 2 + 3 + 4 + 5 + 6,
157 	k7nops + 1 + 2 + 3 + 4 + 5 + 6 + 7,
158 	k7nops + 1 + 2 + 3 + 4 + 5 + 6 + 7 + 8,
159 };
160 #endif
161 
162 #ifdef P6_NOP1
163 static const unsigned char p6nops[] =
164 {
165 	P6_NOP1,
166 	P6_NOP2,
167 	P6_NOP3,
168 	P6_NOP4,
169 	P6_NOP5,
170 	P6_NOP6,
171 	P6_NOP7,
172 	P6_NOP8,
173 	P6_NOP5_ATOMIC
174 };
175 static const unsigned char * const p6_nops[ASM_NOP_MAX+2] =
176 {
177 	NULL,
178 	p6nops,
179 	p6nops + 1,
180 	p6nops + 1 + 2,
181 	p6nops + 1 + 2 + 3,
182 	p6nops + 1 + 2 + 3 + 4,
183 	p6nops + 1 + 2 + 3 + 4 + 5,
184 	p6nops + 1 + 2 + 3 + 4 + 5 + 6,
185 	p6nops + 1 + 2 + 3 + 4 + 5 + 6 + 7,
186 	p6nops + 1 + 2 + 3 + 4 + 5 + 6 + 7 + 8,
187 };
188 #endif
189 
190 /* Initialize these to a safe default */
191 #ifdef CONFIG_X86_64
192 const unsigned char * const *ideal_nops = p6_nops;
193 #else
194 const unsigned char * const *ideal_nops = intel_nops;
195 #endif
196 
arch_init_ideal_nops(void)197 void __init arch_init_ideal_nops(void)
198 {
199 	switch (boot_cpu_data.x86_vendor) {
200 	case X86_VENDOR_INTEL:
201 		/*
202 		 * Due to a decoder implementation quirk, some
203 		 * specific Intel CPUs actually perform better with
204 		 * the "k8_nops" than with the SDM-recommended NOPs.
205 		 */
206 		if (boot_cpu_data.x86 == 6 &&
207 		    boot_cpu_data.x86_model >= 0x0f &&
208 		    boot_cpu_data.x86_model != 0x1c &&
209 		    boot_cpu_data.x86_model != 0x26 &&
210 		    boot_cpu_data.x86_model != 0x27 &&
211 		    boot_cpu_data.x86_model < 0x30) {
212 			ideal_nops = k8_nops;
213 		} else if (boot_cpu_has(X86_FEATURE_NOPL)) {
214 			   ideal_nops = p6_nops;
215 		} else {
216 #ifdef CONFIG_X86_64
217 			ideal_nops = k8_nops;
218 #else
219 			ideal_nops = intel_nops;
220 #endif
221 		}
222 		break;
223 
224 	case X86_VENDOR_AMD:
225 		if (boot_cpu_data.x86 > 0xf) {
226 			ideal_nops = p6_nops;
227 			return;
228 		}
229 
230 		/* fall through */
231 
232 	default:
233 #ifdef CONFIG_X86_64
234 		ideal_nops = k8_nops;
235 #else
236 		if (boot_cpu_has(X86_FEATURE_K8))
237 			ideal_nops = k8_nops;
238 		else if (boot_cpu_has(X86_FEATURE_K7))
239 			ideal_nops = k7_nops;
240 		else
241 			ideal_nops = intel_nops;
242 #endif
243 	}
244 }
245 
246 /* Use this to add nops to a buffer, then text_poke the whole buffer. */
add_nops(void * insns,unsigned int len)247 static void __init_or_module add_nops(void *insns, unsigned int len)
248 {
249 	while (len > 0) {
250 		unsigned int noplen = len;
251 		if (noplen > ASM_NOP_MAX)
252 			noplen = ASM_NOP_MAX;
253 		memcpy(insns, ideal_nops[noplen], noplen);
254 		insns += noplen;
255 		len -= noplen;
256 	}
257 }
258 
259 extern struct alt_instr __alt_instructions[], __alt_instructions_end[];
260 extern s32 __smp_locks[], __smp_locks_end[];
261 void *text_poke_early(void *addr, const void *opcode, size_t len);
262 
263 /*
264  * Are we looking at a near JMP with a 1 or 4-byte displacement.
265  */
is_jmp(const u8 opcode)266 static inline bool is_jmp(const u8 opcode)
267 {
268 	return opcode == 0xeb || opcode == 0xe9;
269 }
270 
271 static void __init_or_module
recompute_jump(struct alt_instr * a,u8 * orig_insn,u8 * repl_insn,u8 * insnbuf)272 recompute_jump(struct alt_instr *a, u8 *orig_insn, u8 *repl_insn, u8 *insnbuf)
273 {
274 	u8 *next_rip, *tgt_rip;
275 	s32 n_dspl, o_dspl;
276 	int repl_len;
277 
278 	if (a->replacementlen != 5)
279 		return;
280 
281 	o_dspl = *(s32 *)(insnbuf + 1);
282 
283 	/* next_rip of the replacement JMP */
284 	next_rip = repl_insn + a->replacementlen;
285 	/* target rip of the replacement JMP */
286 	tgt_rip  = next_rip + o_dspl;
287 	n_dspl = tgt_rip - orig_insn;
288 
289 	DPRINTK("target RIP: %p, new_displ: 0x%x", tgt_rip, n_dspl);
290 
291 	if (tgt_rip - orig_insn >= 0) {
292 		if (n_dspl - 2 <= 127)
293 			goto two_byte_jmp;
294 		else
295 			goto five_byte_jmp;
296 	/* negative offset */
297 	} else {
298 		if (((n_dspl - 2) & 0xff) == (n_dspl - 2))
299 			goto two_byte_jmp;
300 		else
301 			goto five_byte_jmp;
302 	}
303 
304 two_byte_jmp:
305 	n_dspl -= 2;
306 
307 	insnbuf[0] = 0xeb;
308 	insnbuf[1] = (s8)n_dspl;
309 	add_nops(insnbuf + 2, 3);
310 
311 	repl_len = 2;
312 	goto done;
313 
314 five_byte_jmp:
315 	n_dspl -= 5;
316 
317 	insnbuf[0] = 0xe9;
318 	*(s32 *)&insnbuf[1] = n_dspl;
319 
320 	repl_len = 5;
321 
322 done:
323 
324 	DPRINTK("final displ: 0x%08x, JMP 0x%lx",
325 		n_dspl, (unsigned long)orig_insn + n_dspl + repl_len);
326 }
327 
optimize_nops(struct alt_instr * a,u8 * instr)328 static void __init_or_module optimize_nops(struct alt_instr *a, u8 *instr)
329 {
330 	unsigned long flags;
331 	int i;
332 
333 	for (i = 0; i < a->padlen; i++) {
334 		if (instr[i] != 0x90)
335 			return;
336 	}
337 
338 	local_irq_save(flags);
339 	add_nops(instr + (a->instrlen - a->padlen), a->padlen);
340 	sync_core();
341 	local_irq_restore(flags);
342 
343 	DUMP_BYTES(instr, a->instrlen, "%p: [%d:%d) optimized NOPs: ",
344 		   instr, a->instrlen - a->padlen, a->padlen);
345 }
346 
347 /*
348  * Replace instructions with better alternatives for this CPU type. This runs
349  * before SMP is initialized to avoid SMP problems with self modifying code.
350  * This implies that asymmetric systems where APs have less capabilities than
351  * the boot processor are not handled. Tough. Make sure you disable such
352  * features by hand.
353  */
apply_alternatives(struct alt_instr * start,struct alt_instr * end)354 void __init_or_module apply_alternatives(struct alt_instr *start,
355 					 struct alt_instr *end)
356 {
357 	struct alt_instr *a;
358 	u8 *instr, *replacement;
359 	u8 insnbuf[MAX_PATCH_LEN];
360 
361 	DPRINTK("alt table %p -> %p", start, end);
362 	/*
363 	 * The scan order should be from start to end. A later scanned
364 	 * alternative code can overwrite previously scanned alternative code.
365 	 * Some kernel functions (e.g. memcpy, memset, etc) use this order to
366 	 * patch code.
367 	 *
368 	 * So be careful if you want to change the scan order to any other
369 	 * order.
370 	 */
371 	for (a = start; a < end; a++) {
372 		int insnbuf_sz = 0;
373 
374 		instr = (u8 *)&a->instr_offset + a->instr_offset;
375 		replacement = (u8 *)&a->repl_offset + a->repl_offset;
376 		BUG_ON(a->instrlen > sizeof(insnbuf));
377 		BUG_ON(a->cpuid >= (NCAPINTS + NBUGINTS) * 32);
378 		if (!boot_cpu_has(a->cpuid)) {
379 			if (a->padlen > 1)
380 				optimize_nops(a, instr);
381 
382 			continue;
383 		}
384 
385 		DPRINTK("feat: %d*32+%d, old: (%p, len: %d), repl: (%p, len: %d), pad: %d",
386 			a->cpuid >> 5,
387 			a->cpuid & 0x1f,
388 			instr, a->instrlen,
389 			replacement, a->replacementlen, a->padlen);
390 
391 		DUMP_BYTES(instr, a->instrlen, "%p: old_insn: ", instr);
392 		DUMP_BYTES(replacement, a->replacementlen, "%p: rpl_insn: ", replacement);
393 
394 		memcpy(insnbuf, replacement, a->replacementlen);
395 		insnbuf_sz = a->replacementlen;
396 
397 		/* 0xe8 is a relative jump; fix the offset. */
398 		if (*insnbuf == 0xe8 && a->replacementlen == 5) {
399 			*(s32 *)(insnbuf + 1) += replacement - instr;
400 			DPRINTK("Fix CALL offset: 0x%x, CALL 0x%lx",
401 				*(s32 *)(insnbuf + 1),
402 				(unsigned long)instr + *(s32 *)(insnbuf + 1) + 5);
403 		}
404 
405 		if (a->replacementlen && is_jmp(replacement[0]))
406 			recompute_jump(a, instr, replacement, insnbuf);
407 
408 		if (a->instrlen > a->replacementlen) {
409 			add_nops(insnbuf + a->replacementlen,
410 				 a->instrlen - a->replacementlen);
411 			insnbuf_sz += a->instrlen - a->replacementlen;
412 		}
413 		DUMP_BYTES(insnbuf, insnbuf_sz, "%p: final_insn: ", instr);
414 
415 		text_poke_early(instr, insnbuf, insnbuf_sz);
416 	}
417 }
418 
419 #ifdef CONFIG_SMP
alternatives_smp_lock(const s32 * start,const s32 * end,u8 * text,u8 * text_end)420 static void alternatives_smp_lock(const s32 *start, const s32 *end,
421 				  u8 *text, u8 *text_end)
422 {
423 	const s32 *poff;
424 
425 	mutex_lock(&text_mutex);
426 	for (poff = start; poff < end; poff++) {
427 		u8 *ptr = (u8 *)poff + *poff;
428 
429 		if (!*poff || ptr < text || ptr >= text_end)
430 			continue;
431 		/* turn DS segment override prefix into lock prefix */
432 		if (*ptr == 0x3e)
433 			text_poke(ptr, ((unsigned char []){0xf0}), 1);
434 	}
435 	mutex_unlock(&text_mutex);
436 }
437 
alternatives_smp_unlock(const s32 * start,const s32 * end,u8 * text,u8 * text_end)438 static void alternatives_smp_unlock(const s32 *start, const s32 *end,
439 				    u8 *text, u8 *text_end)
440 {
441 	const s32 *poff;
442 
443 	mutex_lock(&text_mutex);
444 	for (poff = start; poff < end; poff++) {
445 		u8 *ptr = (u8 *)poff + *poff;
446 
447 		if (!*poff || ptr < text || ptr >= text_end)
448 			continue;
449 		/* turn lock prefix into DS segment override prefix */
450 		if (*ptr == 0xf0)
451 			text_poke(ptr, ((unsigned char []){0x3E}), 1);
452 	}
453 	mutex_unlock(&text_mutex);
454 }
455 
456 struct smp_alt_module {
457 	/* what is this ??? */
458 	struct module	*mod;
459 	char		*name;
460 
461 	/* ptrs to lock prefixes */
462 	const s32	*locks;
463 	const s32	*locks_end;
464 
465 	/* .text segment, needed to avoid patching init code ;) */
466 	u8		*text;
467 	u8		*text_end;
468 
469 	struct list_head next;
470 };
471 static LIST_HEAD(smp_alt_modules);
472 static DEFINE_MUTEX(smp_alt);
473 static bool uniproc_patched = false;	/* protected by smp_alt */
474 
alternatives_smp_module_add(struct module * mod,char * name,void * locks,void * locks_end,void * text,void * text_end)475 void __init_or_module alternatives_smp_module_add(struct module *mod,
476 						  char *name,
477 						  void *locks, void *locks_end,
478 						  void *text,  void *text_end)
479 {
480 	struct smp_alt_module *smp;
481 
482 	mutex_lock(&smp_alt);
483 	if (!uniproc_patched)
484 		goto unlock;
485 
486 	if (num_possible_cpus() == 1)
487 		/* Don't bother remembering, we'll never have to undo it. */
488 		goto smp_unlock;
489 
490 	smp = kzalloc(sizeof(*smp), GFP_KERNEL);
491 	if (NULL == smp)
492 		/* we'll run the (safe but slow) SMP code then ... */
493 		goto unlock;
494 
495 	smp->mod	= mod;
496 	smp->name	= name;
497 	smp->locks	= locks;
498 	smp->locks_end	= locks_end;
499 	smp->text	= text;
500 	smp->text_end	= text_end;
501 	DPRINTK("locks %p -> %p, text %p -> %p, name %s\n",
502 		smp->locks, smp->locks_end,
503 		smp->text, smp->text_end, smp->name);
504 
505 	list_add_tail(&smp->next, &smp_alt_modules);
506 smp_unlock:
507 	alternatives_smp_unlock(locks, locks_end, text, text_end);
508 unlock:
509 	mutex_unlock(&smp_alt);
510 }
511 
alternatives_smp_module_del(struct module * mod)512 void __init_or_module alternatives_smp_module_del(struct module *mod)
513 {
514 	struct smp_alt_module *item;
515 
516 	mutex_lock(&smp_alt);
517 	list_for_each_entry(item, &smp_alt_modules, next) {
518 		if (mod != item->mod)
519 			continue;
520 		list_del(&item->next);
521 		kfree(item);
522 		break;
523 	}
524 	mutex_unlock(&smp_alt);
525 }
526 
alternatives_enable_smp(void)527 void alternatives_enable_smp(void)
528 {
529 	struct smp_alt_module *mod;
530 
531 	/* Why bother if there are no other CPUs? */
532 	BUG_ON(num_possible_cpus() == 1);
533 
534 	mutex_lock(&smp_alt);
535 
536 	if (uniproc_patched) {
537 		pr_info("switching to SMP code\n");
538 		BUG_ON(num_online_cpus() != 1);
539 		clear_cpu_cap(&boot_cpu_data, X86_FEATURE_UP);
540 		clear_cpu_cap(&cpu_data(0), X86_FEATURE_UP);
541 		list_for_each_entry(mod, &smp_alt_modules, next)
542 			alternatives_smp_lock(mod->locks, mod->locks_end,
543 					      mod->text, mod->text_end);
544 		uniproc_patched = false;
545 	}
546 	mutex_unlock(&smp_alt);
547 }
548 
549 /* Return 1 if the address range is reserved for smp-alternatives */
alternatives_text_reserved(void * start,void * end)550 int alternatives_text_reserved(void *start, void *end)
551 {
552 	struct smp_alt_module *mod;
553 	const s32 *poff;
554 	u8 *text_start = start;
555 	u8 *text_end = end;
556 
557 	list_for_each_entry(mod, &smp_alt_modules, next) {
558 		if (mod->text > text_end || mod->text_end < text_start)
559 			continue;
560 		for (poff = mod->locks; poff < mod->locks_end; poff++) {
561 			const u8 *ptr = (const u8 *)poff + *poff;
562 
563 			if (text_start <= ptr && text_end > ptr)
564 				return 1;
565 		}
566 	}
567 
568 	return 0;
569 }
570 #endif /* CONFIG_SMP */
571 
572 #ifdef CONFIG_PARAVIRT
apply_paravirt(struct paravirt_patch_site * start,struct paravirt_patch_site * end)573 void __init_or_module apply_paravirt(struct paravirt_patch_site *start,
574 				     struct paravirt_patch_site *end)
575 {
576 	struct paravirt_patch_site *p;
577 	char insnbuf[MAX_PATCH_LEN];
578 
579 	for (p = start; p < end; p++) {
580 		unsigned int used;
581 
582 		BUG_ON(p->len > MAX_PATCH_LEN);
583 		/* prep the buffer with the original instructions */
584 		memcpy(insnbuf, p->instr, p->len);
585 		used = pv_init_ops.patch(p->instrtype, p->clobbers, insnbuf,
586 					 (unsigned long)p->instr, p->len);
587 
588 		BUG_ON(used > p->len);
589 
590 		/* Pad the rest with nops */
591 		add_nops(insnbuf + used, p->len - used);
592 		text_poke_early(p->instr, insnbuf, p->len);
593 	}
594 }
595 extern struct paravirt_patch_site __start_parainstructions[],
596 	__stop_parainstructions[];
597 #endif	/* CONFIG_PARAVIRT */
598 
alternative_instructions(void)599 void __init alternative_instructions(void)
600 {
601 	/* The patching is not fully atomic, so try to avoid local interruptions
602 	   that might execute the to be patched code.
603 	   Other CPUs are not running. */
604 	stop_nmi();
605 
606 	/*
607 	 * Don't stop machine check exceptions while patching.
608 	 * MCEs only happen when something got corrupted and in this
609 	 * case we must do something about the corruption.
610 	 * Ignoring it is worse than a unlikely patching race.
611 	 * Also machine checks tend to be broadcast and if one CPU
612 	 * goes into machine check the others follow quickly, so we don't
613 	 * expect a machine check to cause undue problems during to code
614 	 * patching.
615 	 */
616 
617 	apply_alternatives(__alt_instructions, __alt_instructions_end);
618 
619 #ifdef CONFIG_SMP
620 	/* Patch to UP if other cpus not imminent. */
621 	if (!noreplace_smp && (num_present_cpus() == 1 || setup_max_cpus <= 1)) {
622 		uniproc_patched = true;
623 		alternatives_smp_module_add(NULL, "core kernel",
624 					    __smp_locks, __smp_locks_end,
625 					    _text, _etext);
626 	}
627 
628 	if (!uniproc_patched || num_possible_cpus() == 1)
629 		free_init_pages("SMP alternatives",
630 				(unsigned long)__smp_locks,
631 				(unsigned long)__smp_locks_end);
632 #endif
633 
634 	apply_paravirt(__parainstructions, __parainstructions_end);
635 
636 	restart_nmi();
637 	alternatives_patched = 1;
638 }
639 
640 /**
641  * text_poke_early - Update instructions on a live kernel at boot time
642  * @addr: address to modify
643  * @opcode: source of the copy
644  * @len: length to copy
645  *
646  * When you use this code to patch more than one byte of an instruction
647  * you need to make sure that other CPUs cannot execute this code in parallel.
648  * Also no thread must be currently preempted in the middle of these
649  * instructions. And on the local CPU you need to be protected again NMI or MCE
650  * handlers seeing an inconsistent instruction while you patch.
651  */
text_poke_early(void * addr,const void * opcode,size_t len)652 void *__init_or_module text_poke_early(void *addr, const void *opcode,
653 					      size_t len)
654 {
655 	unsigned long flags;
656 	local_irq_save(flags);
657 	memcpy(addr, opcode, len);
658 	sync_core();
659 	local_irq_restore(flags);
660 	/* Could also do a CLFLUSH here to speed up CPU recovery; but
661 	   that causes hangs on some VIA CPUs. */
662 	return addr;
663 }
664 
665 /**
666  * text_poke - Update instructions on a live kernel
667  * @addr: address to modify
668  * @opcode: source of the copy
669  * @len: length to copy
670  *
671  * Only atomic text poke/set should be allowed when not doing early patching.
672  * It means the size must be writable atomically and the address must be aligned
673  * in a way that permits an atomic write. It also makes sure we fit on a single
674  * page.
675  *
676  * Note: Must be called under text_mutex.
677  */
text_poke(void * addr,const void * opcode,size_t len)678 void *text_poke(void *addr, const void *opcode, size_t len)
679 {
680 	unsigned long flags;
681 	char *vaddr;
682 	struct page *pages[2];
683 	int i;
684 
685 	if (!core_kernel_text((unsigned long)addr)) {
686 		pages[0] = vmalloc_to_page(addr);
687 		pages[1] = vmalloc_to_page(addr + PAGE_SIZE);
688 	} else {
689 		pages[0] = virt_to_page(addr);
690 		WARN_ON(!PageReserved(pages[0]));
691 		pages[1] = virt_to_page(addr + PAGE_SIZE);
692 	}
693 	BUG_ON(!pages[0]);
694 	local_irq_save(flags);
695 	set_fixmap(FIX_TEXT_POKE0, page_to_phys(pages[0]));
696 	if (pages[1])
697 		set_fixmap(FIX_TEXT_POKE1, page_to_phys(pages[1]));
698 	vaddr = (char *)fix_to_virt(FIX_TEXT_POKE0);
699 	memcpy(&vaddr[(unsigned long)addr & ~PAGE_MASK], opcode, len);
700 	clear_fixmap(FIX_TEXT_POKE0);
701 	if (pages[1])
702 		clear_fixmap(FIX_TEXT_POKE1);
703 	local_flush_tlb();
704 	sync_core();
705 	/* Could also do a CLFLUSH here to speed up CPU recovery; but
706 	   that causes hangs on some VIA CPUs. */
707 	for (i = 0; i < len; i++)
708 		BUG_ON(((char *)addr)[i] != ((char *)opcode)[i]);
709 	local_irq_restore(flags);
710 	return addr;
711 }
712 
do_sync_core(void * info)713 static void do_sync_core(void *info)
714 {
715 	sync_core();
716 }
717 
718 static bool bp_patching_in_progress;
719 static void *bp_int3_handler, *bp_int3_addr;
720 
poke_int3_handler(struct pt_regs * regs)721 int poke_int3_handler(struct pt_regs *regs)
722 {
723 	/* bp_patching_in_progress */
724 	smp_rmb();
725 
726 	if (likely(!bp_patching_in_progress))
727 		return 0;
728 
729 	if (user_mode(regs) || regs->ip != (unsigned long)bp_int3_addr)
730 		return 0;
731 
732 	/* set up the specified breakpoint handler */
733 	regs->ip = (unsigned long) bp_int3_handler;
734 
735 	return 1;
736 
737 }
738 
739 /**
740  * text_poke_bp() -- update instructions on live kernel on SMP
741  * @addr:	address to patch
742  * @opcode:	opcode of new instruction
743  * @len:	length to copy
744  * @handler:	address to jump to when the temporary breakpoint is hit
745  *
746  * Modify multi-byte instruction by using int3 breakpoint on SMP.
747  * We completely avoid stop_machine() here, and achieve the
748  * synchronization using int3 breakpoint.
749  *
750  * The way it is done:
751  *	- add a int3 trap to the address that will be patched
752  *	- sync cores
753  *	- update all but the first byte of the patched range
754  *	- sync cores
755  *	- replace the first byte (int3) by the first byte of
756  *	  replacing opcode
757  *	- sync cores
758  *
759  * Note: must be called under text_mutex.
760  */
text_poke_bp(void * addr,const void * opcode,size_t len,void * handler)761 void *text_poke_bp(void *addr, const void *opcode, size_t len, void *handler)
762 {
763 	unsigned char int3 = 0xcc;
764 
765 	bp_int3_handler = handler;
766 	bp_int3_addr = (u8 *)addr + sizeof(int3);
767 	bp_patching_in_progress = true;
768 	/*
769 	 * Corresponding read barrier in int3 notifier for
770 	 * making sure the in_progress flags is correctly ordered wrt.
771 	 * patching
772 	 */
773 	smp_wmb();
774 
775 	text_poke(addr, &int3, sizeof(int3));
776 
777 	on_each_cpu(do_sync_core, NULL, 1);
778 
779 	if (len - sizeof(int3) > 0) {
780 		/* patch all but the first byte */
781 		text_poke((char *)addr + sizeof(int3),
782 			  (const char *) opcode + sizeof(int3),
783 			  len - sizeof(int3));
784 		/*
785 		 * According to Intel, this core syncing is very likely
786 		 * not necessary and we'd be safe even without it. But
787 		 * better safe than sorry (plus there's not only Intel).
788 		 */
789 		on_each_cpu(do_sync_core, NULL, 1);
790 	}
791 
792 	/* patch the first byte */
793 	text_poke(addr, opcode, sizeof(int3));
794 
795 	on_each_cpu(do_sync_core, NULL, 1);
796 
797 	bp_patching_in_progress = false;
798 	smp_wmb();
799 
800 	return addr;
801 }
802 
803