• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  *  Kernel Probes Jump Optimization (Optprobes)
3  *
4  * This program is free software; you can redistribute it and/or modify
5  * it under the terms of the GNU General Public License as published by
6  * the Free Software Foundation; either version 2 of the License, or
7  * (at your option) any later version.
8  *
9  * This program is distributed in the hope that it will be useful,
10  * but WITHOUT ANY WARRANTY; without even the implied warranty of
11  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
12  * GNU General Public License for more details.
13  *
14  * You should have received a copy of the GNU General Public License
15  * along with this program; if not, write to the Free Software
16  * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
17  *
18  * Copyright (C) IBM Corporation, 2002, 2004
19  * Copyright (C) Hitachi Ltd., 2012
20  */
21 #include <linux/kprobes.h>
22 #include <linux/ptrace.h>
23 #include <linux/string.h>
24 #include <linux/slab.h>
25 #include <linux/hardirq.h>
26 #include <linux/preempt.h>
27 #include <linux/extable.h>
28 #include <linux/kdebug.h>
29 #include <linux/kallsyms.h>
30 #include <linux/ftrace.h>
31 #include <linux/frame.h>
32 
33 #include <asm/text-patching.h>
34 #include <asm/cacheflush.h>
35 #include <asm/desc.h>
36 #include <asm/pgtable.h>
37 #include <linux/uaccess.h>
38 #include <asm/alternative.h>
39 #include <asm/insn.h>
40 #include <asm/debugreg.h>
41 #include <asm/set_memory.h>
42 #include <asm/sections.h>
43 #include <asm/nospec-branch.h>
44 
45 #include "common.h"
46 
__recover_optprobed_insn(kprobe_opcode_t * buf,unsigned long addr)47 unsigned long __recover_optprobed_insn(kprobe_opcode_t *buf, unsigned long addr)
48 {
49 	struct optimized_kprobe *op;
50 	struct kprobe *kp;
51 	long offs;
52 	int i;
53 
54 	for (i = 0; i < RELATIVEJUMP_SIZE; i++) {
55 		kp = get_kprobe((void *)addr - i);
56 		/* This function only handles jump-optimized kprobe */
57 		if (kp && kprobe_optimized(kp)) {
58 			op = container_of(kp, struct optimized_kprobe, kp);
59 			/* If op->list is not empty, op is under optimizing */
60 			if (list_empty(&op->list))
61 				goto found;
62 		}
63 	}
64 
65 	return addr;
66 found:
67 	/*
68 	 * If the kprobe can be optimized, original bytes which can be
69 	 * overwritten by jump destination address. In this case, original
70 	 * bytes must be recovered from op->optinsn.copied_insn buffer.
71 	 */
72 	if (probe_kernel_read(buf, (void *)addr,
73 		MAX_INSN_SIZE * sizeof(kprobe_opcode_t)))
74 		return 0UL;
75 
76 	if (addr == (unsigned long)kp->addr) {
77 		buf[0] = kp->opcode;
78 		memcpy(buf + 1, op->optinsn.copied_insn, RELATIVE_ADDR_SIZE);
79 	} else {
80 		offs = addr - (unsigned long)kp->addr - 1;
81 		memcpy(buf, op->optinsn.copied_insn + offs, RELATIVE_ADDR_SIZE - offs);
82 	}
83 
84 	return (unsigned long)buf;
85 }
86 
87 /* Insert a move instruction which sets a pointer to eax/rdi (1st arg). */
synthesize_set_arg1(kprobe_opcode_t * addr,unsigned long val)88 static void synthesize_set_arg1(kprobe_opcode_t *addr, unsigned long val)
89 {
90 #ifdef CONFIG_X86_64
91 	*addr++ = 0x48;
92 	*addr++ = 0xbf;
93 #else
94 	*addr++ = 0xb8;
95 #endif
96 	*(unsigned long *)addr = val;
97 }
98 
99 asm (
100 			"optprobe_template_func:\n"
101 			".global optprobe_template_entry\n"
102 			"optprobe_template_entry:\n"
103 #ifdef CONFIG_X86_64
104 			/* We don't bother saving the ss register */
105 			"	pushq %rsp\n"
106 			"	pushfq\n"
107 			SAVE_REGS_STRING
108 			"	movq %rsp, %rsi\n"
109 			".global optprobe_template_val\n"
110 			"optprobe_template_val:\n"
111 			ASM_NOP5
112 			ASM_NOP5
113 			".global optprobe_template_call\n"
114 			"optprobe_template_call:\n"
115 			ASM_NOP5
116 			/* Move flags to rsp */
117 			"	movq 144(%rsp), %rdx\n"
118 			"	movq %rdx, 152(%rsp)\n"
119 			RESTORE_REGS_STRING
120 			/* Skip flags entry */
121 			"	addq $8, %rsp\n"
122 			"	popfq\n"
123 #else /* CONFIG_X86_32 */
124 			"	pushf\n"
125 			SAVE_REGS_STRING
126 			"	movl %esp, %edx\n"
127 			".global optprobe_template_val\n"
128 			"optprobe_template_val:\n"
129 			ASM_NOP5
130 			".global optprobe_template_call\n"
131 			"optprobe_template_call:\n"
132 			ASM_NOP5
133 			RESTORE_REGS_STRING
134 			"	addl $4, %esp\n"	/* skip cs */
135 			"	popf\n"
136 #endif
137 			".global optprobe_template_end\n"
138 			"optprobe_template_end:\n"
139 			".type optprobe_template_func, @function\n"
140 			".size optprobe_template_func, .-optprobe_template_func\n");
141 
142 void optprobe_template_func(void);
143 STACK_FRAME_NON_STANDARD(optprobe_template_func);
144 NOKPROBE_SYMBOL(optprobe_template_func);
145 NOKPROBE_SYMBOL(optprobe_template_entry);
146 NOKPROBE_SYMBOL(optprobe_template_val);
147 NOKPROBE_SYMBOL(optprobe_template_call);
148 NOKPROBE_SYMBOL(optprobe_template_end);
149 
150 #define TMPL_MOVE_IDX \
151 	((long)&optprobe_template_val - (long)&optprobe_template_entry)
152 #define TMPL_CALL_IDX \
153 	((long)&optprobe_template_call - (long)&optprobe_template_entry)
154 #define TMPL_END_IDX \
155 	((long)&optprobe_template_end - (long)&optprobe_template_entry)
156 
157 #define INT3_SIZE sizeof(kprobe_opcode_t)
158 
159 /* Optimized kprobe call back function: called from optinsn */
160 static void
optimized_callback(struct optimized_kprobe * op,struct pt_regs * regs)161 optimized_callback(struct optimized_kprobe *op, struct pt_regs *regs)
162 {
163 	struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
164 	unsigned long flags;
165 
166 	/* This is possible if op is under delayed unoptimizing */
167 	if (kprobe_disabled(&op->kp))
168 		return;
169 
170 	local_irq_save(flags);
171 	if (kprobe_running()) {
172 		kprobes_inc_nmissed_count(&op->kp);
173 	} else {
174 		/* Save skipped registers */
175 #ifdef CONFIG_X86_64
176 		regs->cs = __KERNEL_CS;
177 #else
178 		regs->cs = __KERNEL_CS | get_kernel_rpl();
179 		regs->gs = 0;
180 #endif
181 		regs->ip = (unsigned long)op->kp.addr + INT3_SIZE;
182 		regs->orig_ax = ~0UL;
183 
184 		__this_cpu_write(current_kprobe, &op->kp);
185 		kcb->kprobe_status = KPROBE_HIT_ACTIVE;
186 		opt_pre_handler(&op->kp, regs);
187 		__this_cpu_write(current_kprobe, NULL);
188 	}
189 	local_irq_restore(flags);
190 }
191 NOKPROBE_SYMBOL(optimized_callback);
192 
copy_optimized_instructions(u8 * dest,u8 * src)193 static int copy_optimized_instructions(u8 *dest, u8 *src)
194 {
195 	struct insn insn;
196 	int len = 0, ret;
197 
198 	while (len < RELATIVEJUMP_SIZE) {
199 		ret = __copy_instruction(dest + len, src + len, &insn);
200 		if (!ret || !can_boost(&insn, src + len))
201 			return -EINVAL;
202 		len += ret;
203 	}
204 	/* Check whether the address range is reserved */
205 	if (ftrace_text_reserved(src, src + len - 1) ||
206 	    alternatives_text_reserved(src, src + len - 1) ||
207 	    jump_label_text_reserved(src, src + len - 1))
208 		return -EBUSY;
209 
210 	return len;
211 }
212 
213 /* Check whether insn is indirect jump */
__insn_is_indirect_jump(struct insn * insn)214 static int __insn_is_indirect_jump(struct insn *insn)
215 {
216 	return ((insn->opcode.bytes[0] == 0xff &&
217 		(X86_MODRM_REG(insn->modrm.value) & 6) == 4) || /* Jump */
218 		insn->opcode.bytes[0] == 0xea);	/* Segment based jump */
219 }
220 
221 /* Check whether insn jumps into specified address range */
insn_jump_into_range(struct insn * insn,unsigned long start,int len)222 static int insn_jump_into_range(struct insn *insn, unsigned long start, int len)
223 {
224 	unsigned long target = 0;
225 
226 	switch (insn->opcode.bytes[0]) {
227 	case 0xe0:	/* loopne */
228 	case 0xe1:	/* loope */
229 	case 0xe2:	/* loop */
230 	case 0xe3:	/* jcxz */
231 	case 0xe9:	/* near relative jump */
232 	case 0xeb:	/* short relative jump */
233 		break;
234 	case 0x0f:
235 		if ((insn->opcode.bytes[1] & 0xf0) == 0x80) /* jcc near */
236 			break;
237 		return 0;
238 	default:
239 		if ((insn->opcode.bytes[0] & 0xf0) == 0x70) /* jcc short */
240 			break;
241 		return 0;
242 	}
243 	target = (unsigned long)insn->next_byte + insn->immediate.value;
244 
245 	return (start <= target && target <= start + len);
246 }
247 
insn_is_indirect_jump(struct insn * insn)248 static int insn_is_indirect_jump(struct insn *insn)
249 {
250 	int ret = __insn_is_indirect_jump(insn);
251 
252 #ifdef CONFIG_RETPOLINE
253 	/*
254 	 * Jump to x86_indirect_thunk_* is treated as an indirect jump.
255 	 * Note that even with CONFIG_RETPOLINE=y, the kernel compiled with
256 	 * older gcc may use indirect jump. So we add this check instead of
257 	 * replace indirect-jump check.
258 	 */
259 	if (!ret)
260 		ret = insn_jump_into_range(insn,
261 				(unsigned long)__indirect_thunk_start,
262 				(unsigned long)__indirect_thunk_end -
263 				(unsigned long)__indirect_thunk_start);
264 #endif
265 	return ret;
266 }
267 
268 /* Decode whole function to ensure any instructions don't jump into target */
can_optimize(unsigned long paddr)269 static int can_optimize(unsigned long paddr)
270 {
271 	unsigned long addr, size = 0, offset = 0;
272 	struct insn insn;
273 	kprobe_opcode_t buf[MAX_INSN_SIZE];
274 
275 	/* Lookup symbol including addr */
276 	if (!kallsyms_lookup_size_offset(paddr, &size, &offset))
277 		return 0;
278 
279 	/*
280 	 * Do not optimize in the entry code due to the unstable
281 	 * stack handling and registers setup.
282 	 */
283 	if (((paddr >= (unsigned long)__entry_text_start) &&
284 	     (paddr <  (unsigned long)__entry_text_end)) ||
285 	    ((paddr >= (unsigned long)__irqentry_text_start) &&
286 	     (paddr <  (unsigned long)__irqentry_text_end)))
287 		return 0;
288 
289 	/* Check there is enough space for a relative jump. */
290 	if (size - offset < RELATIVEJUMP_SIZE)
291 		return 0;
292 
293 	/* Decode instructions */
294 	addr = paddr - offset;
295 	while (addr < paddr - offset + size) { /* Decode until function end */
296 		unsigned long recovered_insn;
297 		if (search_exception_tables(addr))
298 			/*
299 			 * Since some fixup code will jumps into this function,
300 			 * we can't optimize kprobe in this function.
301 			 */
302 			return 0;
303 		recovered_insn = recover_probed_instruction(buf, addr);
304 		if (!recovered_insn)
305 			return 0;
306 		kernel_insn_init(&insn, (void *)recovered_insn, MAX_INSN_SIZE);
307 		insn_get_length(&insn);
308 		/* Another subsystem puts a breakpoint */
309 		if (insn.opcode.bytes[0] == BREAKPOINT_INSTRUCTION)
310 			return 0;
311 		/* Recover address */
312 		insn.kaddr = (void *)addr;
313 		insn.next_byte = (void *)(addr + insn.length);
314 		/* Check any instructions don't jump into target */
315 		if (insn_is_indirect_jump(&insn) ||
316 		    insn_jump_into_range(&insn, paddr + INT3_SIZE,
317 					 RELATIVE_ADDR_SIZE))
318 			return 0;
319 		addr += insn.length;
320 	}
321 
322 	return 1;
323 }
324 
325 /* Check optimized_kprobe can actually be optimized. */
arch_check_optimized_kprobe(struct optimized_kprobe * op)326 int arch_check_optimized_kprobe(struct optimized_kprobe *op)
327 {
328 	int i;
329 	struct kprobe *p;
330 
331 	for (i = 1; i < op->optinsn.size; i++) {
332 		p = get_kprobe(op->kp.addr + i);
333 		if (p && !kprobe_disabled(p))
334 			return -EEXIST;
335 	}
336 
337 	return 0;
338 }
339 
340 /* Check the addr is within the optimized instructions. */
arch_within_optimized_kprobe(struct optimized_kprobe * op,unsigned long addr)341 int arch_within_optimized_kprobe(struct optimized_kprobe *op,
342 				 unsigned long addr)
343 {
344 	return ((unsigned long)op->kp.addr <= addr &&
345 		(unsigned long)op->kp.addr + op->optinsn.size > addr);
346 }
347 
348 /* Free optimized instruction slot */
349 static
__arch_remove_optimized_kprobe(struct optimized_kprobe * op,int dirty)350 void __arch_remove_optimized_kprobe(struct optimized_kprobe *op, int dirty)
351 {
352 	if (op->optinsn.insn) {
353 		free_optinsn_slot(op->optinsn.insn, dirty);
354 		op->optinsn.insn = NULL;
355 		op->optinsn.size = 0;
356 	}
357 }
358 
arch_remove_optimized_kprobe(struct optimized_kprobe * op)359 void arch_remove_optimized_kprobe(struct optimized_kprobe *op)
360 {
361 	__arch_remove_optimized_kprobe(op, 1);
362 }
363 
364 /*
365  * Copy replacing target instructions
366  * Target instructions MUST be relocatable (checked inside)
367  * This is called when new aggr(opt)probe is allocated or reused.
368  */
arch_prepare_optimized_kprobe(struct optimized_kprobe * op,struct kprobe * __unused)369 int arch_prepare_optimized_kprobe(struct optimized_kprobe *op,
370 				  struct kprobe *__unused)
371 {
372 	u8 *buf;
373 	int ret;
374 	long rel;
375 
376 	if (!can_optimize((unsigned long)op->kp.addr))
377 		return -EILSEQ;
378 
379 	op->optinsn.insn = get_optinsn_slot();
380 	if (!op->optinsn.insn)
381 		return -ENOMEM;
382 
383 	/*
384 	 * Verify if the address gap is in 2GB range, because this uses
385 	 * a relative jump.
386 	 */
387 	rel = (long)op->optinsn.insn - (long)op->kp.addr + RELATIVEJUMP_SIZE;
388 	if (abs(rel) > 0x7fffffff) {
389 		__arch_remove_optimized_kprobe(op, 0);
390 		return -ERANGE;
391 	}
392 
393 	buf = (u8 *)op->optinsn.insn;
394 	set_memory_rw((unsigned long)buf & PAGE_MASK, 1);
395 
396 	/* Copy instructions into the out-of-line buffer */
397 	ret = copy_optimized_instructions(buf + TMPL_END_IDX, op->kp.addr);
398 	if (ret < 0) {
399 		__arch_remove_optimized_kprobe(op, 0);
400 		return ret;
401 	}
402 	op->optinsn.size = ret;
403 
404 	/* Copy arch-dep-instance from template */
405 	memcpy(buf, &optprobe_template_entry, TMPL_END_IDX);
406 
407 	/* Set probe information */
408 	synthesize_set_arg1(buf + TMPL_MOVE_IDX, (unsigned long)op);
409 
410 	/* Set probe function call */
411 	synthesize_relcall(buf + TMPL_CALL_IDX, optimized_callback);
412 
413 	/* Set returning jmp instruction at the tail of out-of-line buffer */
414 	synthesize_reljump(buf + TMPL_END_IDX + op->optinsn.size,
415 			   (u8 *)op->kp.addr + op->optinsn.size);
416 
417 	set_memory_ro((unsigned long)buf & PAGE_MASK, 1);
418 
419 	flush_icache_range((unsigned long) buf,
420 			   (unsigned long) buf + TMPL_END_IDX +
421 			   op->optinsn.size + RELATIVEJUMP_SIZE);
422 	return 0;
423 }
424 
425 /*
426  * Replace breakpoints (int3) with relative jumps.
427  * Caller must call with locking kprobe_mutex and text_mutex.
428  */
arch_optimize_kprobes(struct list_head * oplist)429 void arch_optimize_kprobes(struct list_head *oplist)
430 {
431 	struct optimized_kprobe *op, *tmp;
432 	u8 insn_buf[RELATIVEJUMP_SIZE];
433 
434 	list_for_each_entry_safe(op, tmp, oplist, list) {
435 		s32 rel = (s32)((long)op->optinsn.insn -
436 			((long)op->kp.addr + RELATIVEJUMP_SIZE));
437 
438 		WARN_ON(kprobe_disabled(&op->kp));
439 
440 		/* Backup instructions which will be replaced by jump address */
441 		memcpy(op->optinsn.copied_insn, op->kp.addr + INT3_SIZE,
442 		       RELATIVE_ADDR_SIZE);
443 
444 		insn_buf[0] = RELATIVEJUMP_OPCODE;
445 		*(s32 *)(&insn_buf[1]) = rel;
446 
447 		text_poke_bp(op->kp.addr, insn_buf, RELATIVEJUMP_SIZE,
448 			     op->optinsn.insn);
449 
450 		list_del_init(&op->list);
451 	}
452 }
453 
454 /* Replace a relative jump with a breakpoint (int3).  */
arch_unoptimize_kprobe(struct optimized_kprobe * op)455 void arch_unoptimize_kprobe(struct optimized_kprobe *op)
456 {
457 	u8 insn_buf[RELATIVEJUMP_SIZE];
458 
459 	/* Set int3 to first byte for kprobes */
460 	insn_buf[0] = BREAKPOINT_INSTRUCTION;
461 	memcpy(insn_buf + 1, op->optinsn.copied_insn, RELATIVE_ADDR_SIZE);
462 	text_poke_bp(op->kp.addr, insn_buf, RELATIVEJUMP_SIZE,
463 		     op->optinsn.insn);
464 }
465 
466 /*
467  * Recover original instructions and breakpoints from relative jumps.
468  * Caller must call with locking kprobe_mutex.
469  */
arch_unoptimize_kprobes(struct list_head * oplist,struct list_head * done_list)470 extern void arch_unoptimize_kprobes(struct list_head *oplist,
471 				    struct list_head *done_list)
472 {
473 	struct optimized_kprobe *op, *tmp;
474 
475 	list_for_each_entry_safe(op, tmp, oplist, list) {
476 		arch_unoptimize_kprobe(op);
477 		list_move(&op->list, done_list);
478 	}
479 }
480 
setup_detour_execution(struct kprobe * p,struct pt_regs * regs,int reenter)481 int setup_detour_execution(struct kprobe *p, struct pt_regs *regs, int reenter)
482 {
483 	struct optimized_kprobe *op;
484 
485 	if (p->flags & KPROBE_FLAG_OPTIMIZED) {
486 		/* This kprobe is really able to run optimized path. */
487 		op = container_of(p, struct optimized_kprobe, kp);
488 		/* Detour through copied instructions */
489 		regs->ip = (unsigned long)op->optinsn.insn + TMPL_END_IDX;
490 		if (!reenter)
491 			reset_current_kprobe();
492 		preempt_enable_no_resched();
493 		return 1;
494 	}
495 	return 0;
496 }
497 NOKPROBE_SYMBOL(setup_detour_execution);
498