• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3  *  Kernel Probes Jump Optimization (Optprobes)
4  *
5  * Copyright (C) IBM Corporation, 2002, 2004
6  * Copyright (C) Hitachi Ltd., 2012
7  */
8 #include <linux/kprobes.h>
9 #include <linux/perf_event.h>
10 #include <linux/ptrace.h>
11 #include <linux/string.h>
12 #include <linux/slab.h>
13 #include <linux/hardirq.h>
14 #include <linux/preempt.h>
15 #include <linux/extable.h>
16 #include <linux/kdebug.h>
17 #include <linux/kallsyms.h>
18 #include <linux/kgdb.h>
19 #include <linux/ftrace.h>
20 #include <linux/objtool.h>
21 #include <linux/pgtable.h>
22 #include <linux/static_call.h>
23 
24 #include <asm/text-patching.h>
25 #include <asm/cacheflush.h>
26 #include <asm/desc.h>
27 #include <linux/uaccess.h>
28 #include <asm/alternative.h>
29 #include <asm/insn.h>
30 #include <asm/debugreg.h>
31 #include <asm/set_memory.h>
32 #include <asm/sections.h>
33 #include <asm/nospec-branch.h>
34 
35 #include "common.h"
36 
__recover_optprobed_insn(kprobe_opcode_t * buf,unsigned long addr)37 unsigned long __recover_optprobed_insn(kprobe_opcode_t *buf, unsigned long addr)
38 {
39 	struct optimized_kprobe *op;
40 	struct kprobe *kp;
41 	long offs;
42 	int i;
43 
44 	for (i = 0; i < JMP32_INSN_SIZE; i++) {
45 		kp = get_kprobe((void *)addr - i);
46 		/* This function only handles jump-optimized kprobe */
47 		if (kp && kprobe_optimized(kp)) {
48 			op = container_of(kp, struct optimized_kprobe, kp);
49 			/* If op is optimized or under unoptimizing */
50 			if (list_empty(&op->list) || optprobe_queued_unopt(op))
51 				goto found;
52 		}
53 	}
54 
55 	return addr;
56 found:
57 	/*
58 	 * If the kprobe can be optimized, original bytes which can be
59 	 * overwritten by jump destination address. In this case, original
60 	 * bytes must be recovered from op->optinsn.copied_insn buffer.
61 	 */
62 	if (copy_from_kernel_nofault(buf, (void *)addr,
63 		MAX_INSN_SIZE * sizeof(kprobe_opcode_t)))
64 		return 0UL;
65 
66 	if (addr == (unsigned long)kp->addr) {
67 		buf[0] = kp->opcode;
68 		memcpy(buf + 1, op->optinsn.copied_insn, DISP32_SIZE);
69 	} else {
70 		offs = addr - (unsigned long)kp->addr - 1;
71 		memcpy(buf, op->optinsn.copied_insn + offs, DISP32_SIZE - offs);
72 	}
73 
74 	return (unsigned long)buf;
75 }
76 
synthesize_clac(kprobe_opcode_t * addr)77 static void synthesize_clac(kprobe_opcode_t *addr)
78 {
79 	/*
80 	 * Can't be static_cpu_has() due to how objtool treats this feature bit.
81 	 * This isn't a fast path anyway.
82 	 */
83 	if (!boot_cpu_has(X86_FEATURE_SMAP))
84 		return;
85 
86 	/* Replace the NOP3 with CLAC */
87 	addr[0] = 0x0f;
88 	addr[1] = 0x01;
89 	addr[2] = 0xca;
90 }
91 
92 /* Insert a move instruction which sets a pointer to eax/rdi (1st arg). */
synthesize_set_arg1(kprobe_opcode_t * addr,unsigned long val)93 static void synthesize_set_arg1(kprobe_opcode_t *addr, unsigned long val)
94 {
95 #ifdef CONFIG_X86_64
96 	*addr++ = 0x48;
97 	*addr++ = 0xbf;
98 #else
99 	*addr++ = 0xb8;
100 #endif
101 	*(unsigned long *)addr = val;
102 }
103 
104 asm (
105 			".pushsection .rodata\n"
106 			"optprobe_template_func:\n"
107 			".pushsection .discard.func_stack_frame_non_standard\n"
108 			"__func_stack_frame_non_standard_optprobe_template_func:\n"
109 #ifdef CONFIG_64BIT
110 		        ".quad optprobe_template_func\n"
111 #else
112 			".long optprobe_template_func\n"
113 #endif
114 			".popsection\n"
115 			".global optprobe_template_entry\n"
116 			"optprobe_template_entry:\n"
117 #ifdef CONFIG_X86_64
118 			/* We don't bother saving the ss register */
119 			"	pushq %rsp\n"
120 			"	pushfq\n"
121 			".global optprobe_template_clac\n"
122 			"optprobe_template_clac:\n"
123 			ASM_NOP3
124 			SAVE_REGS_STRING
125 			"	movq %rsp, %rsi\n"
126 			".global optprobe_template_val\n"
127 			"optprobe_template_val:\n"
128 			ASM_NOP5
129 			ASM_NOP5
130 			".global optprobe_template_call\n"
131 			"optprobe_template_call:\n"
132 			ASM_NOP5
133 			/* Move flags to rsp */
134 			"	movq 18*8(%rsp), %rdx\n"
135 			"	movq %rdx, 19*8(%rsp)\n"
136 			RESTORE_REGS_STRING
137 			/* Skip flags entry */
138 			"	addq $8, %rsp\n"
139 			"	popfq\n"
140 #else /* CONFIG_X86_32 */
141 			"	pushl %esp\n"
142 			"	pushfl\n"
143 			".global optprobe_template_clac\n"
144 			"optprobe_template_clac:\n"
145 			ASM_NOP3
146 			SAVE_REGS_STRING
147 			"	movl %esp, %edx\n"
148 			".global optprobe_template_val\n"
149 			"optprobe_template_val:\n"
150 			ASM_NOP5
151 			".global optprobe_template_call\n"
152 			"optprobe_template_call:\n"
153 			ASM_NOP5
154 			/* Move flags into esp */
155 			"	movl 14*4(%esp), %edx\n"
156 			"	movl %edx, 15*4(%esp)\n"
157 			RESTORE_REGS_STRING
158 			/* Skip flags entry */
159 			"	addl $4, %esp\n"
160 			"	popfl\n"
161 #endif
162 			".global optprobe_template_end\n"
163 			"optprobe_template_end:\n"
164 			".popsection\n");
165 
166 #define TMPL_CLAC_IDX \
167 	((long)optprobe_template_clac - (long)optprobe_template_entry)
168 #define TMPL_MOVE_IDX \
169 	((long)optprobe_template_val - (long)optprobe_template_entry)
170 #define TMPL_CALL_IDX \
171 	((long)optprobe_template_call - (long)optprobe_template_entry)
172 #define TMPL_END_IDX \
173 	((long)optprobe_template_end - (long)optprobe_template_entry)
174 
175 /* Optimized kprobe call back function: called from optinsn */
176 static void
optimized_callback(struct optimized_kprobe * op,struct pt_regs * regs)177 optimized_callback(struct optimized_kprobe *op, struct pt_regs *regs)
178 {
179 	/* This is possible if op is under delayed unoptimizing */
180 	if (kprobe_disabled(&op->kp))
181 		return;
182 
183 	preempt_disable();
184 	if (kprobe_running()) {
185 		kprobes_inc_nmissed_count(&op->kp);
186 	} else {
187 		struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
188 		/* Save skipped registers */
189 		regs->cs = __KERNEL_CS;
190 #ifdef CONFIG_X86_32
191 		regs->gs = 0;
192 #endif
193 		regs->ip = (unsigned long)op->kp.addr + INT3_INSN_SIZE;
194 		regs->orig_ax = ~0UL;
195 
196 		__this_cpu_write(current_kprobe, &op->kp);
197 		kcb->kprobe_status = KPROBE_HIT_ACTIVE;
198 		opt_pre_handler(&op->kp, regs);
199 		__this_cpu_write(current_kprobe, NULL);
200 	}
201 	preempt_enable();
202 }
203 NOKPROBE_SYMBOL(optimized_callback);
204 
copy_optimized_instructions(u8 * dest,u8 * src,u8 * real)205 static int copy_optimized_instructions(u8 *dest, u8 *src, u8 *real)
206 {
207 	struct insn insn;
208 	int len = 0, ret;
209 
210 	while (len < JMP32_INSN_SIZE) {
211 		ret = __copy_instruction(dest + len, src + len, real + len, &insn);
212 		if (!ret || !can_boost(&insn, src + len))
213 			return -EINVAL;
214 		len += ret;
215 	}
216 	/* Check whether the address range is reserved */
217 	if (ftrace_text_reserved(src, src + len - 1) ||
218 	    alternatives_text_reserved(src, src + len - 1) ||
219 	    jump_label_text_reserved(src, src + len - 1) ||
220 	    static_call_text_reserved(src, src + len - 1))
221 		return -EBUSY;
222 
223 	return len;
224 }
225 
226 /* Check whether insn is indirect jump */
__insn_is_indirect_jump(struct insn * insn)227 static int __insn_is_indirect_jump(struct insn *insn)
228 {
229 	return ((insn->opcode.bytes[0] == 0xff &&
230 		(X86_MODRM_REG(insn->modrm.value) & 6) == 4) || /* Jump */
231 		insn->opcode.bytes[0] == 0xea);	/* Segment based jump */
232 }
233 
234 /* Check whether insn jumps into specified address range */
insn_jump_into_range(struct insn * insn,unsigned long start,int len)235 static int insn_jump_into_range(struct insn *insn, unsigned long start, int len)
236 {
237 	unsigned long target = 0;
238 
239 	switch (insn->opcode.bytes[0]) {
240 	case 0xe0:	/* loopne */
241 	case 0xe1:	/* loope */
242 	case 0xe2:	/* loop */
243 	case 0xe3:	/* jcxz */
244 	case 0xe9:	/* near relative jump */
245 	case 0xeb:	/* short relative jump */
246 		break;
247 	case 0x0f:
248 		if ((insn->opcode.bytes[1] & 0xf0) == 0x80) /* jcc near */
249 			break;
250 		return 0;
251 	default:
252 		if ((insn->opcode.bytes[0] & 0xf0) == 0x70) /* jcc short */
253 			break;
254 		return 0;
255 	}
256 	target = (unsigned long)insn->next_byte + insn->immediate.value;
257 
258 	return (start <= target && target <= start + len);
259 }
260 
insn_is_indirect_jump(struct insn * insn)261 static int insn_is_indirect_jump(struct insn *insn)
262 {
263 	int ret = __insn_is_indirect_jump(insn);
264 
265 #ifdef CONFIG_RETPOLINE
266 	/*
267 	 * Jump to x86_indirect_thunk_* is treated as an indirect jump.
268 	 * Note that even with CONFIG_RETPOLINE=y, the kernel compiled with
269 	 * older gcc may use indirect jump. So we add this check instead of
270 	 * replace indirect-jump check.
271 	 */
272 	if (!ret)
273 		ret = insn_jump_into_range(insn,
274 				(unsigned long)__indirect_thunk_start,
275 				(unsigned long)__indirect_thunk_end -
276 				(unsigned long)__indirect_thunk_start);
277 #endif
278 	return ret;
279 }
280 
281 /* Decode whole function to ensure any instructions don't jump into target */
can_optimize(unsigned long paddr)282 static int can_optimize(unsigned long paddr)
283 {
284 	unsigned long addr, size = 0, offset = 0;
285 	struct insn insn;
286 	kprobe_opcode_t buf[MAX_INSN_SIZE];
287 
288 	/* Lookup symbol including addr */
289 	if (!kallsyms_lookup_size_offset(paddr, &size, &offset))
290 		return 0;
291 
292 	/*
293 	 * Do not optimize in the entry code due to the unstable
294 	 * stack handling and registers setup.
295 	 */
296 	if (((paddr >= (unsigned long)__entry_text_start) &&
297 	     (paddr <  (unsigned long)__entry_text_end)))
298 		return 0;
299 
300 	/* Check there is enough space for a relative jump. */
301 	if (size - offset < JMP32_INSN_SIZE)
302 		return 0;
303 
304 	/* Decode instructions */
305 	addr = paddr - offset;
306 	while (addr < paddr - offset + size) { /* Decode until function end */
307 		unsigned long recovered_insn;
308 		int ret;
309 
310 		if (search_exception_tables(addr))
311 			/*
312 			 * Since some fixup code will jumps into this function,
313 			 * we can't optimize kprobe in this function.
314 			 */
315 			return 0;
316 		recovered_insn = recover_probed_instruction(buf, addr);
317 		if (!recovered_insn)
318 			return 0;
319 
320 		ret = insn_decode(&insn, (void *)recovered_insn, MAX_INSN_SIZE, INSN_MODE_KERN);
321 		if (ret < 0)
322 			return 0;
323 #ifdef CONFIG_KGDB
324 		/*
325 		 * If there is a dynamically installed kgdb sw breakpoint,
326 		 * this function should not be probed.
327 		 */
328 		if (insn.opcode.bytes[0] == INT3_INSN_OPCODE &&
329 		    kgdb_has_hit_break(addr))
330 			return 0;
331 #endif
332 		/* Recover address */
333 		insn.kaddr = (void *)addr;
334 		insn.next_byte = (void *)(addr + insn.length);
335 		/* Check any instructions don't jump into target */
336 		if (insn_is_indirect_jump(&insn) ||
337 		    insn_jump_into_range(&insn, paddr + INT3_INSN_SIZE,
338 					 DISP32_SIZE))
339 			return 0;
340 		addr += insn.length;
341 	}
342 
343 	return 1;
344 }
345 
346 /* Check optimized_kprobe can actually be optimized. */
arch_check_optimized_kprobe(struct optimized_kprobe * op)347 int arch_check_optimized_kprobe(struct optimized_kprobe *op)
348 {
349 	int i;
350 	struct kprobe *p;
351 
352 	for (i = 1; i < op->optinsn.size; i++) {
353 		p = get_kprobe(op->kp.addr + i);
354 		if (p && !kprobe_disarmed(p))
355 			return -EEXIST;
356 	}
357 
358 	return 0;
359 }
360 
361 /* Check the addr is within the optimized instructions. */
arch_within_optimized_kprobe(struct optimized_kprobe * op,unsigned long addr)362 int arch_within_optimized_kprobe(struct optimized_kprobe *op,
363 				 unsigned long addr)
364 {
365 	return ((unsigned long)op->kp.addr <= addr &&
366 		(unsigned long)op->kp.addr + op->optinsn.size > addr);
367 }
368 
369 /* Free optimized instruction slot */
370 static
__arch_remove_optimized_kprobe(struct optimized_kprobe * op,int dirty)371 void __arch_remove_optimized_kprobe(struct optimized_kprobe *op, int dirty)
372 {
373 	u8 *slot = op->optinsn.insn;
374 	if (slot) {
375 		int len = TMPL_END_IDX + op->optinsn.size + JMP32_INSN_SIZE;
376 
377 		/* Record the perf event before freeing the slot */
378 		if (dirty)
379 			perf_event_text_poke(slot, slot, len, NULL, 0);
380 
381 		free_optinsn_slot(slot, dirty);
382 		op->optinsn.insn = NULL;
383 		op->optinsn.size = 0;
384 	}
385 }
386 
arch_remove_optimized_kprobe(struct optimized_kprobe * op)387 void arch_remove_optimized_kprobe(struct optimized_kprobe *op)
388 {
389 	__arch_remove_optimized_kprobe(op, 1);
390 }
391 
392 /*
393  * Copy replacing target instructions
394  * Target instructions MUST be relocatable (checked inside)
395  * This is called when new aggr(opt)probe is allocated or reused.
396  */
arch_prepare_optimized_kprobe(struct optimized_kprobe * op,struct kprobe * __unused)397 int arch_prepare_optimized_kprobe(struct optimized_kprobe *op,
398 				  struct kprobe *__unused)
399 {
400 	u8 *buf = NULL, *slot;
401 	int ret, len;
402 	long rel;
403 
404 	if (!can_optimize((unsigned long)op->kp.addr))
405 		return -EILSEQ;
406 
407 	buf = kzalloc(MAX_OPTINSN_SIZE, GFP_KERNEL);
408 	if (!buf)
409 		return -ENOMEM;
410 
411 	op->optinsn.insn = slot = get_optinsn_slot();
412 	if (!slot) {
413 		ret = -ENOMEM;
414 		goto out;
415 	}
416 
417 	/*
418 	 * Verify if the address gap is in 2GB range, because this uses
419 	 * a relative jump.
420 	 */
421 	rel = (long)slot - (long)op->kp.addr + JMP32_INSN_SIZE;
422 	if (abs(rel) > 0x7fffffff) {
423 		ret = -ERANGE;
424 		goto err;
425 	}
426 
427 	/* Copy arch-dep-instance from template */
428 	memcpy(buf, optprobe_template_entry, TMPL_END_IDX);
429 
430 	/* Copy instructions into the out-of-line buffer */
431 	ret = copy_optimized_instructions(buf + TMPL_END_IDX, op->kp.addr,
432 					  slot + TMPL_END_IDX);
433 	if (ret < 0)
434 		goto err;
435 	op->optinsn.size = ret;
436 	len = TMPL_END_IDX + op->optinsn.size;
437 
438 	synthesize_clac(buf + TMPL_CLAC_IDX);
439 
440 	/* Set probe information */
441 	synthesize_set_arg1(buf + TMPL_MOVE_IDX, (unsigned long)op);
442 
443 	/* Set probe function call */
444 	synthesize_relcall(buf + TMPL_CALL_IDX,
445 			   slot + TMPL_CALL_IDX, optimized_callback);
446 
447 	/* Set returning jmp instruction at the tail of out-of-line buffer */
448 	synthesize_reljump(buf + len, slot + len,
449 			   (u8 *)op->kp.addr + op->optinsn.size);
450 	len += JMP32_INSN_SIZE;
451 
452 	/*
453 	 * Note	len = TMPL_END_IDX + op->optinsn.size + JMP32_INSN_SIZE is also
454 	 * used in __arch_remove_optimized_kprobe().
455 	 */
456 
457 	/* We have to use text_poke() for instruction buffer because it is RO */
458 	perf_event_text_poke(slot, NULL, 0, buf, len);
459 	text_poke(slot, buf, len);
460 
461 	ret = 0;
462 out:
463 	kfree(buf);
464 	return ret;
465 
466 err:
467 	__arch_remove_optimized_kprobe(op, 0);
468 	goto out;
469 }
470 
471 /*
472  * Replace breakpoints (INT3) with relative jumps (JMP.d32).
473  * Caller must call with locking kprobe_mutex and text_mutex.
474  *
475  * The caller will have installed a regular kprobe and after that issued
476  * syncrhonize_rcu_tasks(), this ensures that the instruction(s) that live in
477  * the 4 bytes after the INT3 are unused and can now be overwritten.
478  */
arch_optimize_kprobes(struct list_head * oplist)479 void arch_optimize_kprobes(struct list_head *oplist)
480 {
481 	struct optimized_kprobe *op, *tmp;
482 	u8 insn_buff[JMP32_INSN_SIZE];
483 
484 	list_for_each_entry_safe(op, tmp, oplist, list) {
485 		s32 rel = (s32)((long)op->optinsn.insn -
486 			((long)op->kp.addr + JMP32_INSN_SIZE));
487 
488 		WARN_ON(kprobe_disabled(&op->kp));
489 
490 		/* Backup instructions which will be replaced by jump address */
491 		memcpy(op->optinsn.copied_insn, op->kp.addr + INT3_INSN_SIZE,
492 		       DISP32_SIZE);
493 
494 		insn_buff[0] = JMP32_INSN_OPCODE;
495 		*(s32 *)(&insn_buff[1]) = rel;
496 
497 		text_poke_bp(op->kp.addr, insn_buff, JMP32_INSN_SIZE, NULL);
498 
499 		list_del_init(&op->list);
500 	}
501 }
502 
503 /*
504  * Replace a relative jump (JMP.d32) with a breakpoint (INT3).
505  *
506  * After that, we can restore the 4 bytes after the INT3 to undo what
507  * arch_optimize_kprobes() scribbled. This is safe since those bytes will be
508  * unused once the INT3 lands.
509  */
arch_unoptimize_kprobe(struct optimized_kprobe * op)510 void arch_unoptimize_kprobe(struct optimized_kprobe *op)
511 {
512 	u8 new[JMP32_INSN_SIZE] = { INT3_INSN_OPCODE, };
513 	u8 old[JMP32_INSN_SIZE];
514 	u8 *addr = op->kp.addr;
515 
516 	memcpy(old, op->kp.addr, JMP32_INSN_SIZE);
517 	memcpy(new + INT3_INSN_SIZE,
518 	       op->optinsn.copied_insn,
519 	       JMP32_INSN_SIZE - INT3_INSN_SIZE);
520 
521 	text_poke(addr, new, INT3_INSN_SIZE);
522 	text_poke_sync();
523 	text_poke(addr + INT3_INSN_SIZE,
524 		  new + INT3_INSN_SIZE,
525 		  JMP32_INSN_SIZE - INT3_INSN_SIZE);
526 	text_poke_sync();
527 
528 	perf_event_text_poke(op->kp.addr, old, JMP32_INSN_SIZE, new, JMP32_INSN_SIZE);
529 }
530 
531 /*
532  * Recover original instructions and breakpoints from relative jumps.
533  * Caller must call with locking kprobe_mutex.
534  */
arch_unoptimize_kprobes(struct list_head * oplist,struct list_head * done_list)535 extern void arch_unoptimize_kprobes(struct list_head *oplist,
536 				    struct list_head *done_list)
537 {
538 	struct optimized_kprobe *op, *tmp;
539 
540 	list_for_each_entry_safe(op, tmp, oplist, list) {
541 		arch_unoptimize_kprobe(op);
542 		list_move(&op->list, done_list);
543 	}
544 }
545 
setup_detour_execution(struct kprobe * p,struct pt_regs * regs,int reenter)546 int setup_detour_execution(struct kprobe *p, struct pt_regs *regs, int reenter)
547 {
548 	struct optimized_kprobe *op;
549 
550 	if (p->flags & KPROBE_FLAG_OPTIMIZED) {
551 		/* This kprobe is really able to run optimized path. */
552 		op = container_of(p, struct optimized_kprobe, kp);
553 		/* Detour through copied instructions */
554 		regs->ip = (unsigned long)op->optinsn.insn + TMPL_END_IDX;
555 		if (!reenter)
556 			reset_current_kprobe();
557 		return 1;
558 	}
559 	return 0;
560 }
561 NOKPROBE_SYMBOL(setup_detour_execution);
562