1 /*
2 * Kernel Probes Jump Optimization (Optprobes)
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License as published by
6 * the Free Software Foundation; either version 2 of the License, or
7 * (at your option) any later version.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write to the Free Software
16 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
17 *
18 * Copyright (C) IBM Corporation, 2002, 2004
19 * Copyright (C) Hitachi Ltd., 2012
20 */
21 #include <linux/kprobes.h>
22 #include <linux/ptrace.h>
23 #include <linux/string.h>
24 #include <linux/slab.h>
25 #include <linux/hardirq.h>
26 #include <linux/preempt.h>
27 #include <linux/extable.h>
28 #include <linux/kdebug.h>
29 #include <linux/kallsyms.h>
30 #include <linux/ftrace.h>
31
32 #include <asm/text-patching.h>
33 #include <asm/cacheflush.h>
34 #include <asm/desc.h>
35 #include <asm/pgtable.h>
36 #include <asm/uaccess.h>
37 #include <asm/alternative.h>
38 #include <asm/insn.h>
39 #include <asm/debugreg.h>
40 #include <asm/nospec-branch.h>
41
42 #include "common.h"
43
__recover_optprobed_insn(kprobe_opcode_t * buf,unsigned long addr)44 unsigned long __recover_optprobed_insn(kprobe_opcode_t *buf, unsigned long addr)
45 {
46 struct optimized_kprobe *op;
47 struct kprobe *kp;
48 long offs;
49 int i;
50
51 for (i = 0; i < RELATIVEJUMP_SIZE; i++) {
52 kp = get_kprobe((void *)addr - i);
53 /* This function only handles jump-optimized kprobe */
54 if (kp && kprobe_optimized(kp)) {
55 op = container_of(kp, struct optimized_kprobe, kp);
56 /* If op->list is not empty, op is under optimizing */
57 if (list_empty(&op->list))
58 goto found;
59 }
60 }
61
62 return addr;
63 found:
64 /*
65 * If the kprobe can be optimized, original bytes which can be
66 * overwritten by jump destination address. In this case, original
67 * bytes must be recovered from op->optinsn.copied_insn buffer.
68 */
69 memcpy(buf, (void *)addr, MAX_INSN_SIZE * sizeof(kprobe_opcode_t));
70 if (addr == (unsigned long)kp->addr) {
71 buf[0] = kp->opcode;
72 memcpy(buf + 1, op->optinsn.copied_insn, RELATIVE_ADDR_SIZE);
73 } else {
74 offs = addr - (unsigned long)kp->addr - 1;
75 memcpy(buf, op->optinsn.copied_insn + offs, RELATIVE_ADDR_SIZE - offs);
76 }
77
78 return (unsigned long)buf;
79 }
80
81 /* Insert a move instruction which sets a pointer to eax/rdi (1st arg). */
synthesize_set_arg1(kprobe_opcode_t * addr,unsigned long val)82 static void synthesize_set_arg1(kprobe_opcode_t *addr, unsigned long val)
83 {
84 #ifdef CONFIG_X86_64
85 *addr++ = 0x48;
86 *addr++ = 0xbf;
87 #else
88 *addr++ = 0xb8;
89 #endif
90 *(unsigned long *)addr = val;
91 }
92
93 asm (
94 ".global optprobe_template_entry\n"
95 "optprobe_template_entry:\n"
96 #ifdef CONFIG_X86_64
97 /* We don't bother saving the ss register */
98 " pushq %rsp\n"
99 " pushfq\n"
100 SAVE_REGS_STRING
101 " movq %rsp, %rsi\n"
102 ".global optprobe_template_val\n"
103 "optprobe_template_val:\n"
104 ASM_NOP5
105 ASM_NOP5
106 ".global optprobe_template_call\n"
107 "optprobe_template_call:\n"
108 ASM_NOP5
109 /* Move flags to rsp */
110 " movq 144(%rsp), %rdx\n"
111 " movq %rdx, 152(%rsp)\n"
112 RESTORE_REGS_STRING
113 /* Skip flags entry */
114 " addq $8, %rsp\n"
115 " popfq\n"
116 #else /* CONFIG_X86_32 */
117 " pushf\n"
118 SAVE_REGS_STRING
119 " movl %esp, %edx\n"
120 ".global optprobe_template_val\n"
121 "optprobe_template_val:\n"
122 ASM_NOP5
123 ".global optprobe_template_call\n"
124 "optprobe_template_call:\n"
125 ASM_NOP5
126 RESTORE_REGS_STRING
127 " addl $4, %esp\n" /* skip cs */
128 " popf\n"
129 #endif
130 ".global optprobe_template_end\n"
131 "optprobe_template_end:\n");
132
133 #define TMPL_MOVE_IDX \
134 ((long)&optprobe_template_val - (long)&optprobe_template_entry)
135 #define TMPL_CALL_IDX \
136 ((long)&optprobe_template_call - (long)&optprobe_template_entry)
137 #define TMPL_END_IDX \
138 ((long)&optprobe_template_end - (long)&optprobe_template_entry)
139
140 #define INT3_SIZE sizeof(kprobe_opcode_t)
141
142 /* Optimized kprobe call back function: called from optinsn */
143 static void
optimized_callback(struct optimized_kprobe * op,struct pt_regs * regs)144 optimized_callback(struct optimized_kprobe *op, struct pt_regs *regs)
145 {
146 struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
147 unsigned long flags;
148
149 /* This is possible if op is under delayed unoptimizing */
150 if (kprobe_disabled(&op->kp))
151 return;
152
153 local_irq_save(flags);
154 if (kprobe_running()) {
155 kprobes_inc_nmissed_count(&op->kp);
156 } else {
157 /* Save skipped registers */
158 #ifdef CONFIG_X86_64
159 regs->cs = __KERNEL_CS;
160 #else
161 regs->cs = __KERNEL_CS | get_kernel_rpl();
162 regs->gs = 0;
163 #endif
164 regs->ip = (unsigned long)op->kp.addr + INT3_SIZE;
165 regs->orig_ax = ~0UL;
166
167 __this_cpu_write(current_kprobe, &op->kp);
168 kcb->kprobe_status = KPROBE_HIT_ACTIVE;
169 opt_pre_handler(&op->kp, regs);
170 __this_cpu_write(current_kprobe, NULL);
171 }
172 local_irq_restore(flags);
173 }
174 NOKPROBE_SYMBOL(optimized_callback);
175
copy_optimized_instructions(u8 * dest,u8 * src)176 static int copy_optimized_instructions(u8 *dest, u8 *src)
177 {
178 int len = 0, ret;
179
180 while (len < RELATIVEJUMP_SIZE) {
181 ret = __copy_instruction(dest + len, src + len);
182 if (!ret || !can_boost(dest + len, src + len))
183 return -EINVAL;
184 len += ret;
185 }
186 /* Check whether the address range is reserved */
187 if (ftrace_text_reserved(src, src + len - 1) ||
188 alternatives_text_reserved(src, src + len - 1) ||
189 jump_label_text_reserved(src, src + len - 1))
190 return -EBUSY;
191
192 return len;
193 }
194
195 /* Check whether insn is indirect jump */
__insn_is_indirect_jump(struct insn * insn)196 static int __insn_is_indirect_jump(struct insn *insn)
197 {
198 return ((insn->opcode.bytes[0] == 0xff &&
199 (X86_MODRM_REG(insn->modrm.value) & 6) == 4) || /* Jump */
200 insn->opcode.bytes[0] == 0xea); /* Segment based jump */
201 }
202
203 /* Check whether insn jumps into specified address range */
insn_jump_into_range(struct insn * insn,unsigned long start,int len)204 static int insn_jump_into_range(struct insn *insn, unsigned long start, int len)
205 {
206 unsigned long target = 0;
207
208 switch (insn->opcode.bytes[0]) {
209 case 0xe0: /* loopne */
210 case 0xe1: /* loope */
211 case 0xe2: /* loop */
212 case 0xe3: /* jcxz */
213 case 0xe9: /* near relative jump */
214 case 0xeb: /* short relative jump */
215 break;
216 case 0x0f:
217 if ((insn->opcode.bytes[1] & 0xf0) == 0x80) /* jcc near */
218 break;
219 return 0;
220 default:
221 if ((insn->opcode.bytes[0] & 0xf0) == 0x70) /* jcc short */
222 break;
223 return 0;
224 }
225 target = (unsigned long)insn->next_byte + insn->immediate.value;
226
227 return (start <= target && target <= start + len);
228 }
229
insn_is_indirect_jump(struct insn * insn)230 static int insn_is_indirect_jump(struct insn *insn)
231 {
232 int ret = __insn_is_indirect_jump(insn);
233
234 #ifdef CONFIG_RETPOLINE
235 /*
236 * Jump to x86_indirect_thunk_* is treated as an indirect jump.
237 * Note that even with CONFIG_RETPOLINE=y, the kernel compiled with
238 * older gcc may use indirect jump. So we add this check instead of
239 * replace indirect-jump check.
240 */
241 if (!ret)
242 ret = insn_jump_into_range(insn,
243 (unsigned long)__indirect_thunk_start,
244 (unsigned long)__indirect_thunk_end -
245 (unsigned long)__indirect_thunk_start);
246 #endif
247 return ret;
248 }
249
250 /* Decode whole function to ensure any instructions don't jump into target */
can_optimize(unsigned long paddr)251 static int can_optimize(unsigned long paddr)
252 {
253 unsigned long addr, size = 0, offset = 0;
254 struct insn insn;
255 kprobe_opcode_t buf[MAX_INSN_SIZE];
256
257 /* Lookup symbol including addr */
258 if (!kallsyms_lookup_size_offset(paddr, &size, &offset))
259 return 0;
260
261 /*
262 * Do not optimize in the entry code due to the unstable
263 * stack handling.
264 */
265 if ((paddr >= (unsigned long)__entry_text_start) &&
266 (paddr < (unsigned long)__entry_text_end))
267 return 0;
268
269 /* Check there is enough space for a relative jump. */
270 if (size - offset < RELATIVEJUMP_SIZE)
271 return 0;
272
273 /* Decode instructions */
274 addr = paddr - offset;
275 while (addr < paddr - offset + size) { /* Decode until function end */
276 unsigned long recovered_insn;
277 if (search_exception_tables(addr))
278 /*
279 * Since some fixup code will jumps into this function,
280 * we can't optimize kprobe in this function.
281 */
282 return 0;
283 recovered_insn = recover_probed_instruction(buf, addr);
284 if (!recovered_insn)
285 return 0;
286 kernel_insn_init(&insn, (void *)recovered_insn, MAX_INSN_SIZE);
287 insn_get_length(&insn);
288 /* Another subsystem puts a breakpoint */
289 if (insn.opcode.bytes[0] == BREAKPOINT_INSTRUCTION)
290 return 0;
291 /* Recover address */
292 insn.kaddr = (void *)addr;
293 insn.next_byte = (void *)(addr + insn.length);
294 /* Check any instructions don't jump into target */
295 if (insn_is_indirect_jump(&insn) ||
296 insn_jump_into_range(&insn, paddr + INT3_SIZE,
297 RELATIVE_ADDR_SIZE))
298 return 0;
299 addr += insn.length;
300 }
301
302 return 1;
303 }
304
305 /* Check optimized_kprobe can actually be optimized. */
arch_check_optimized_kprobe(struct optimized_kprobe * op)306 int arch_check_optimized_kprobe(struct optimized_kprobe *op)
307 {
308 int i;
309 struct kprobe *p;
310
311 for (i = 1; i < op->optinsn.size; i++) {
312 p = get_kprobe(op->kp.addr + i);
313 if (p && !kprobe_disabled(p))
314 return -EEXIST;
315 }
316
317 return 0;
318 }
319
320 /* Check the addr is within the optimized instructions. */
arch_within_optimized_kprobe(struct optimized_kprobe * op,unsigned long addr)321 int arch_within_optimized_kprobe(struct optimized_kprobe *op,
322 unsigned long addr)
323 {
324 return ((unsigned long)op->kp.addr <= addr &&
325 (unsigned long)op->kp.addr + op->optinsn.size > addr);
326 }
327
328 /* Free optimized instruction slot */
329 static
__arch_remove_optimized_kprobe(struct optimized_kprobe * op,int dirty)330 void __arch_remove_optimized_kprobe(struct optimized_kprobe *op, int dirty)
331 {
332 if (op->optinsn.insn) {
333 free_optinsn_slot(op->optinsn.insn, dirty);
334 op->optinsn.insn = NULL;
335 op->optinsn.size = 0;
336 }
337 }
338
arch_remove_optimized_kprobe(struct optimized_kprobe * op)339 void arch_remove_optimized_kprobe(struct optimized_kprobe *op)
340 {
341 __arch_remove_optimized_kprobe(op, 1);
342 }
343
344 /*
345 * Copy replacing target instructions
346 * Target instructions MUST be relocatable (checked inside)
347 * This is called when new aggr(opt)probe is allocated or reused.
348 */
arch_prepare_optimized_kprobe(struct optimized_kprobe * op,struct kprobe * __unused)349 int arch_prepare_optimized_kprobe(struct optimized_kprobe *op,
350 struct kprobe *__unused)
351 {
352 u8 *buf;
353 int ret;
354 long rel;
355
356 if (!can_optimize((unsigned long)op->kp.addr))
357 return -EILSEQ;
358
359 op->optinsn.insn = get_optinsn_slot();
360 if (!op->optinsn.insn)
361 return -ENOMEM;
362
363 /*
364 * Verify if the address gap is in 2GB range, because this uses
365 * a relative jump.
366 */
367 rel = (long)op->optinsn.insn - (long)op->kp.addr + RELATIVEJUMP_SIZE;
368 if (abs(rel) > 0x7fffffff) {
369 __arch_remove_optimized_kprobe(op, 0);
370 return -ERANGE;
371 }
372
373 buf = (u8 *)op->optinsn.insn;
374 set_memory_rw((unsigned long)buf & PAGE_MASK, 1);
375
376 /* Copy instructions into the out-of-line buffer */
377 ret = copy_optimized_instructions(buf + TMPL_END_IDX, op->kp.addr);
378 if (ret < 0) {
379 __arch_remove_optimized_kprobe(op, 0);
380 return ret;
381 }
382 op->optinsn.size = ret;
383
384 /* Copy arch-dep-instance from template */
385 memcpy(buf, &optprobe_template_entry, TMPL_END_IDX);
386
387 /* Set probe information */
388 synthesize_set_arg1(buf + TMPL_MOVE_IDX, (unsigned long)op);
389
390 /* Set probe function call */
391 synthesize_relcall(buf + TMPL_CALL_IDX, optimized_callback);
392
393 /* Set returning jmp instruction at the tail of out-of-line buffer */
394 synthesize_reljump(buf + TMPL_END_IDX + op->optinsn.size,
395 (u8 *)op->kp.addr + op->optinsn.size);
396
397 set_memory_ro((unsigned long)buf & PAGE_MASK, 1);
398
399 flush_icache_range((unsigned long) buf,
400 (unsigned long) buf + TMPL_END_IDX +
401 op->optinsn.size + RELATIVEJUMP_SIZE);
402 return 0;
403 }
404
405 /*
406 * Replace breakpoints (int3) with relative jumps.
407 * Caller must call with locking kprobe_mutex and text_mutex.
408 */
arch_optimize_kprobes(struct list_head * oplist)409 void arch_optimize_kprobes(struct list_head *oplist)
410 {
411 struct optimized_kprobe *op, *tmp;
412 u8 insn_buf[RELATIVEJUMP_SIZE];
413
414 list_for_each_entry_safe(op, tmp, oplist, list) {
415 s32 rel = (s32)((long)op->optinsn.insn -
416 ((long)op->kp.addr + RELATIVEJUMP_SIZE));
417
418 WARN_ON(kprobe_disabled(&op->kp));
419
420 /* Backup instructions which will be replaced by jump address */
421 memcpy(op->optinsn.copied_insn, op->kp.addr + INT3_SIZE,
422 RELATIVE_ADDR_SIZE);
423
424 insn_buf[0] = RELATIVEJUMP_OPCODE;
425 *(s32 *)(&insn_buf[1]) = rel;
426
427 text_poke_bp(op->kp.addr, insn_buf, RELATIVEJUMP_SIZE,
428 op->optinsn.insn);
429
430 list_del_init(&op->list);
431 }
432 }
433
434 /* Replace a relative jump with a breakpoint (int3). */
arch_unoptimize_kprobe(struct optimized_kprobe * op)435 void arch_unoptimize_kprobe(struct optimized_kprobe *op)
436 {
437 u8 insn_buf[RELATIVEJUMP_SIZE];
438
439 /* Set int3 to first byte for kprobes */
440 insn_buf[0] = BREAKPOINT_INSTRUCTION;
441 memcpy(insn_buf + 1, op->optinsn.copied_insn, RELATIVE_ADDR_SIZE);
442 text_poke_bp(op->kp.addr, insn_buf, RELATIVEJUMP_SIZE,
443 op->optinsn.insn);
444 }
445
446 /*
447 * Recover original instructions and breakpoints from relative jumps.
448 * Caller must call with locking kprobe_mutex.
449 */
arch_unoptimize_kprobes(struct list_head * oplist,struct list_head * done_list)450 extern void arch_unoptimize_kprobes(struct list_head *oplist,
451 struct list_head *done_list)
452 {
453 struct optimized_kprobe *op, *tmp;
454
455 list_for_each_entry_safe(op, tmp, oplist, list) {
456 arch_unoptimize_kprobe(op);
457 list_move(&op->list, done_list);
458 }
459 }
460
setup_detour_execution(struct kprobe * p,struct pt_regs * regs,int reenter)461 int setup_detour_execution(struct kprobe *p, struct pt_regs *regs, int reenter)
462 {
463 struct optimized_kprobe *op;
464
465 if (p->flags & KPROBE_FLAG_OPTIMIZED) {
466 /* This kprobe is really able to run optimized path. */
467 op = container_of(p, struct optimized_kprobe, kp);
468 /* Detour through copied instructions */
469 regs->ip = (unsigned long)op->optinsn.insn + TMPL_END_IDX;
470 if (!reenter)
471 reset_current_kprobe();
472 preempt_enable_no_resched();
473 return 1;
474 }
475 return 0;
476 }
477 NOKPROBE_SYMBOL(setup_detour_execution);
478