1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3 * Kernel Probes Jump Optimization (Optprobes)
4 *
5 * Copyright (C) IBM Corporation, 2002, 2004
6 * Copyright (C) Hitachi Ltd., 2012
7 */
8 #include <linux/kprobes.h>
9 #include <linux/perf_event.h>
10 #include <linux/ptrace.h>
11 #include <linux/string.h>
12 #include <linux/slab.h>
13 #include <linux/hardirq.h>
14 #include <linux/preempt.h>
15 #include <linux/extable.h>
16 #include <linux/kdebug.h>
17 #include <linux/kallsyms.h>
18 #include <linux/kgdb.h>
19 #include <linux/ftrace.h>
20 #include <linux/objtool.h>
21 #include <linux/pgtable.h>
22 #include <linux/static_call.h>
23
24 #include <asm/text-patching.h>
25 #include <asm/cacheflush.h>
26 #include <asm/desc.h>
27 #include <linux/uaccess.h>
28 #include <asm/alternative.h>
29 #include <asm/insn.h>
30 #include <asm/debugreg.h>
31 #include <asm/set_memory.h>
32 #include <asm/sections.h>
33 #include <asm/nospec-branch.h>
34
35 #include "common.h"
36
__recover_optprobed_insn(kprobe_opcode_t * buf,unsigned long addr)37 unsigned long __recover_optprobed_insn(kprobe_opcode_t *buf, unsigned long addr)
38 {
39 struct optimized_kprobe *op;
40 struct kprobe *kp;
41 long offs;
42 int i;
43
44 for (i = 0; i < JMP32_INSN_SIZE; i++) {
45 kp = get_kprobe((void *)addr - i);
46 /* This function only handles jump-optimized kprobe */
47 if (kp && kprobe_optimized(kp)) {
48 op = container_of(kp, struct optimized_kprobe, kp);
49 /* If op->list is not empty, op is under optimizing */
50 if (list_empty(&op->list))
51 goto found;
52 }
53 }
54
55 return addr;
56 found:
57 /*
58 * If the kprobe can be optimized, original bytes which can be
59 * overwritten by jump destination address. In this case, original
60 * bytes must be recovered from op->optinsn.copied_insn buffer.
61 */
62 if (copy_from_kernel_nofault(buf, (void *)addr,
63 MAX_INSN_SIZE * sizeof(kprobe_opcode_t)))
64 return 0UL;
65
66 if (addr == (unsigned long)kp->addr) {
67 buf[0] = kp->opcode;
68 memcpy(buf + 1, op->optinsn.copied_insn, DISP32_SIZE);
69 } else {
70 offs = addr - (unsigned long)kp->addr - 1;
71 memcpy(buf, op->optinsn.copied_insn + offs, DISP32_SIZE - offs);
72 }
73
74 return (unsigned long)buf;
75 }
76
synthesize_clac(kprobe_opcode_t * addr)77 static void synthesize_clac(kprobe_opcode_t *addr)
78 {
79 /*
80 * Can't be static_cpu_has() due to how objtool treats this feature bit.
81 * This isn't a fast path anyway.
82 */
83 if (!boot_cpu_has(X86_FEATURE_SMAP))
84 return;
85
86 /* Replace the NOP3 with CLAC */
87 addr[0] = 0x0f;
88 addr[1] = 0x01;
89 addr[2] = 0xca;
90 }
91
92 /* Insert a move instruction which sets a pointer to eax/rdi (1st arg). */
synthesize_set_arg1(kprobe_opcode_t * addr,unsigned long val)93 static void synthesize_set_arg1(kprobe_opcode_t *addr, unsigned long val)
94 {
95 #ifdef CONFIG_X86_64
96 *addr++ = 0x48;
97 *addr++ = 0xbf;
98 #else
99 *addr++ = 0xb8;
100 #endif
101 *(unsigned long *)addr = val;
102 }
103
104 asm (
105 ".pushsection .rodata\n"
106 "optprobe_template_func:\n"
107 ".global optprobe_template_entry\n"
108 "optprobe_template_entry:\n"
109 #ifdef CONFIG_X86_64
110 /* We don't bother saving the ss register */
111 " pushq %rsp\n"
112 " pushfq\n"
113 ".global optprobe_template_clac\n"
114 "optprobe_template_clac:\n"
115 ASM_NOP3
116 SAVE_REGS_STRING
117 " movq %rsp, %rsi\n"
118 ".global optprobe_template_val\n"
119 "optprobe_template_val:\n"
120 ASM_NOP5
121 ASM_NOP5
122 ".global optprobe_template_call\n"
123 "optprobe_template_call:\n"
124 ASM_NOP5
125 /* Move flags to rsp */
126 " movq 18*8(%rsp), %rdx\n"
127 " movq %rdx, 19*8(%rsp)\n"
128 RESTORE_REGS_STRING
129 /* Skip flags entry */
130 " addq $8, %rsp\n"
131 " popfq\n"
132 #else /* CONFIG_X86_32 */
133 " pushl %esp\n"
134 " pushfl\n"
135 ".global optprobe_template_clac\n"
136 "optprobe_template_clac:\n"
137 ASM_NOP3
138 SAVE_REGS_STRING
139 " movl %esp, %edx\n"
140 ".global optprobe_template_val\n"
141 "optprobe_template_val:\n"
142 ASM_NOP5
143 ".global optprobe_template_call\n"
144 "optprobe_template_call:\n"
145 ASM_NOP5
146 /* Move flags into esp */
147 " movl 14*4(%esp), %edx\n"
148 " movl %edx, 15*4(%esp)\n"
149 RESTORE_REGS_STRING
150 /* Skip flags entry */
151 " addl $4, %esp\n"
152 " popfl\n"
153 #endif
154 ".global optprobe_template_end\n"
155 "optprobe_template_end:\n"
156 ".popsection\n");
157
158 void optprobe_template_func(void);
159 STACK_FRAME_NON_STANDARD(optprobe_template_func);
160
161 #define TMPL_CLAC_IDX \
162 ((long)optprobe_template_clac - (long)optprobe_template_entry)
163 #define TMPL_MOVE_IDX \
164 ((long)optprobe_template_val - (long)optprobe_template_entry)
165 #define TMPL_CALL_IDX \
166 ((long)optprobe_template_call - (long)optprobe_template_entry)
167 #define TMPL_END_IDX \
168 ((long)optprobe_template_end - (long)optprobe_template_entry)
169
170 /* Optimized kprobe call back function: called from optinsn */
171 static void
optimized_callback(struct optimized_kprobe * op,struct pt_regs * regs)172 optimized_callback(struct optimized_kprobe *op, struct pt_regs *regs)
173 {
174 /* This is possible if op is under delayed unoptimizing */
175 if (kprobe_disabled(&op->kp))
176 return;
177
178 preempt_disable();
179 if (kprobe_running()) {
180 kprobes_inc_nmissed_count(&op->kp);
181 } else {
182 struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
183 /* Save skipped registers */
184 regs->cs = __KERNEL_CS;
185 #ifdef CONFIG_X86_32
186 regs->gs = 0;
187 #endif
188 regs->ip = (unsigned long)op->kp.addr + INT3_INSN_SIZE;
189 regs->orig_ax = ~0UL;
190
191 __this_cpu_write(current_kprobe, &op->kp);
192 kcb->kprobe_status = KPROBE_HIT_ACTIVE;
193 opt_pre_handler(&op->kp, regs);
194 __this_cpu_write(current_kprobe, NULL);
195 }
196 preempt_enable();
197 }
198 NOKPROBE_SYMBOL(optimized_callback);
199
copy_optimized_instructions(u8 * dest,u8 * src,u8 * real)200 static int copy_optimized_instructions(u8 *dest, u8 *src, u8 *real)
201 {
202 struct insn insn;
203 int len = 0, ret;
204
205 while (len < JMP32_INSN_SIZE) {
206 ret = __copy_instruction(dest + len, src + len, real + len, &insn);
207 if (!ret || !can_boost(&insn, src + len))
208 return -EINVAL;
209 len += ret;
210 }
211 /* Check whether the address range is reserved */
212 if (ftrace_text_reserved(src, src + len - 1) ||
213 alternatives_text_reserved(src, src + len - 1) ||
214 jump_label_text_reserved(src, src + len - 1) ||
215 static_call_text_reserved(src, src + len - 1))
216 return -EBUSY;
217
218 return len;
219 }
220
221 /* Check whether insn is indirect jump */
__insn_is_indirect_jump(struct insn * insn)222 static int __insn_is_indirect_jump(struct insn *insn)
223 {
224 return ((insn->opcode.bytes[0] == 0xff &&
225 (X86_MODRM_REG(insn->modrm.value) & 6) == 4) || /* Jump */
226 insn->opcode.bytes[0] == 0xea); /* Segment based jump */
227 }
228
229 /* Check whether insn jumps into specified address range */
insn_jump_into_range(struct insn * insn,unsigned long start,int len)230 static int insn_jump_into_range(struct insn *insn, unsigned long start, int len)
231 {
232 unsigned long target = 0;
233
234 switch (insn->opcode.bytes[0]) {
235 case 0xe0: /* loopne */
236 case 0xe1: /* loope */
237 case 0xe2: /* loop */
238 case 0xe3: /* jcxz */
239 case 0xe9: /* near relative jump */
240 case 0xeb: /* short relative jump */
241 break;
242 case 0x0f:
243 if ((insn->opcode.bytes[1] & 0xf0) == 0x80) /* jcc near */
244 break;
245 return 0;
246 default:
247 if ((insn->opcode.bytes[0] & 0xf0) == 0x70) /* jcc short */
248 break;
249 return 0;
250 }
251 target = (unsigned long)insn->next_byte + insn->immediate.value;
252
253 return (start <= target && target <= start + len);
254 }
255
insn_is_indirect_jump(struct insn * insn)256 static int insn_is_indirect_jump(struct insn *insn)
257 {
258 int ret = __insn_is_indirect_jump(insn);
259
260 #ifdef CONFIG_RETPOLINE
261 /*
262 * Jump to x86_indirect_thunk_* is treated as an indirect jump.
263 * Note that even with CONFIG_RETPOLINE=y, the kernel compiled with
264 * older gcc may use indirect jump. So we add this check instead of
265 * replace indirect-jump check.
266 */
267 if (!ret)
268 ret = insn_jump_into_range(insn,
269 (unsigned long)__indirect_thunk_start,
270 (unsigned long)__indirect_thunk_end -
271 (unsigned long)__indirect_thunk_start);
272 #endif
273 return ret;
274 }
275
276 /* Decode whole function to ensure any instructions don't jump into target */
can_optimize(unsigned long paddr)277 static int can_optimize(unsigned long paddr)
278 {
279 unsigned long addr, size = 0, offset = 0;
280 struct insn insn;
281 kprobe_opcode_t buf[MAX_INSN_SIZE];
282
283 /* Lookup symbol including addr */
284 if (!kallsyms_lookup_size_offset(paddr, &size, &offset))
285 return 0;
286
287 /*
288 * Do not optimize in the entry code due to the unstable
289 * stack handling and registers setup.
290 */
291 if (((paddr >= (unsigned long)__entry_text_start) &&
292 (paddr < (unsigned long)__entry_text_end)))
293 return 0;
294
295 /* Check there is enough space for a relative jump. */
296 if (size - offset < JMP32_INSN_SIZE)
297 return 0;
298
299 /* Decode instructions */
300 addr = paddr - offset;
301 while (addr < paddr - offset + size) { /* Decode until function end */
302 unsigned long recovered_insn;
303 int ret;
304
305 if (search_exception_tables(addr))
306 /*
307 * Since some fixup code will jumps into this function,
308 * we can't optimize kprobe in this function.
309 */
310 return 0;
311 recovered_insn = recover_probed_instruction(buf, addr);
312 if (!recovered_insn)
313 return 0;
314
315 ret = insn_decode(&insn, (void *)recovered_insn, MAX_INSN_SIZE, INSN_MODE_KERN);
316 if (ret < 0)
317 return 0;
318 #ifdef CONFIG_KGDB
319 /*
320 * If there is a dynamically installed kgdb sw breakpoint,
321 * this function should not be probed.
322 */
323 if (insn.opcode.bytes[0] == INT3_INSN_OPCODE &&
324 kgdb_has_hit_break(addr))
325 return 0;
326 #endif
327 /* Recover address */
328 insn.kaddr = (void *)addr;
329 insn.next_byte = (void *)(addr + insn.length);
330 /* Check any instructions don't jump into target */
331 if (insn_is_indirect_jump(&insn) ||
332 insn_jump_into_range(&insn, paddr + INT3_INSN_SIZE,
333 DISP32_SIZE))
334 return 0;
335 addr += insn.length;
336 }
337
338 return 1;
339 }
340
341 /* Check optimized_kprobe can actually be optimized. */
arch_check_optimized_kprobe(struct optimized_kprobe * op)342 int arch_check_optimized_kprobe(struct optimized_kprobe *op)
343 {
344 int i;
345 struct kprobe *p;
346
347 for (i = 1; i < op->optinsn.size; i++) {
348 p = get_kprobe(op->kp.addr + i);
349 if (p && !kprobe_disabled(p))
350 return -EEXIST;
351 }
352
353 return 0;
354 }
355
356 /* Check the addr is within the optimized instructions. */
arch_within_optimized_kprobe(struct optimized_kprobe * op,unsigned long addr)357 int arch_within_optimized_kprobe(struct optimized_kprobe *op,
358 unsigned long addr)
359 {
360 return ((unsigned long)op->kp.addr <= addr &&
361 (unsigned long)op->kp.addr + op->optinsn.size > addr);
362 }
363
364 /* Free optimized instruction slot */
365 static
__arch_remove_optimized_kprobe(struct optimized_kprobe * op,int dirty)366 void __arch_remove_optimized_kprobe(struct optimized_kprobe *op, int dirty)
367 {
368 u8 *slot = op->optinsn.insn;
369 if (slot) {
370 int len = TMPL_END_IDX + op->optinsn.size + JMP32_INSN_SIZE;
371
372 /* Record the perf event before freeing the slot */
373 if (dirty)
374 perf_event_text_poke(slot, slot, len, NULL, 0);
375
376 free_optinsn_slot(slot, dirty);
377 op->optinsn.insn = NULL;
378 op->optinsn.size = 0;
379 }
380 }
381
arch_remove_optimized_kprobe(struct optimized_kprobe * op)382 void arch_remove_optimized_kprobe(struct optimized_kprobe *op)
383 {
384 __arch_remove_optimized_kprobe(op, 1);
385 }
386
387 /*
388 * Copy replacing target instructions
389 * Target instructions MUST be relocatable (checked inside)
390 * This is called when new aggr(opt)probe is allocated or reused.
391 */
arch_prepare_optimized_kprobe(struct optimized_kprobe * op,struct kprobe * __unused)392 int arch_prepare_optimized_kprobe(struct optimized_kprobe *op,
393 struct kprobe *__unused)
394 {
395 u8 *buf = NULL, *slot;
396 int ret, len;
397 long rel;
398
399 if (!can_optimize((unsigned long)op->kp.addr))
400 return -EILSEQ;
401
402 buf = kzalloc(MAX_OPTINSN_SIZE, GFP_KERNEL);
403 if (!buf)
404 return -ENOMEM;
405
406 op->optinsn.insn = slot = get_optinsn_slot();
407 if (!slot) {
408 ret = -ENOMEM;
409 goto out;
410 }
411
412 /*
413 * Verify if the address gap is in 2GB range, because this uses
414 * a relative jump.
415 */
416 rel = (long)slot - (long)op->kp.addr + JMP32_INSN_SIZE;
417 if (abs(rel) > 0x7fffffff) {
418 ret = -ERANGE;
419 goto err;
420 }
421
422 /* Copy arch-dep-instance from template */
423 memcpy(buf, optprobe_template_entry, TMPL_END_IDX);
424
425 /* Copy instructions into the out-of-line buffer */
426 ret = copy_optimized_instructions(buf + TMPL_END_IDX, op->kp.addr,
427 slot + TMPL_END_IDX);
428 if (ret < 0)
429 goto err;
430 op->optinsn.size = ret;
431 len = TMPL_END_IDX + op->optinsn.size;
432
433 synthesize_clac(buf + TMPL_CLAC_IDX);
434
435 /* Set probe information */
436 synthesize_set_arg1(buf + TMPL_MOVE_IDX, (unsigned long)op);
437
438 /* Set probe function call */
439 synthesize_relcall(buf + TMPL_CALL_IDX,
440 slot + TMPL_CALL_IDX, optimized_callback);
441
442 /* Set returning jmp instruction at the tail of out-of-line buffer */
443 synthesize_reljump(buf + len, slot + len,
444 (u8 *)op->kp.addr + op->optinsn.size);
445 len += JMP32_INSN_SIZE;
446
447 /*
448 * Note len = TMPL_END_IDX + op->optinsn.size + JMP32_INSN_SIZE is also
449 * used in __arch_remove_optimized_kprobe().
450 */
451
452 /* We have to use text_poke() for instruction buffer because it is RO */
453 perf_event_text_poke(slot, NULL, 0, buf, len);
454 text_poke(slot, buf, len);
455
456 ret = 0;
457 out:
458 kfree(buf);
459 return ret;
460
461 err:
462 __arch_remove_optimized_kprobe(op, 0);
463 goto out;
464 }
465
466 /*
467 * Replace breakpoints (INT3) with relative jumps (JMP.d32).
468 * Caller must call with locking kprobe_mutex and text_mutex.
469 *
470 * The caller will have installed a regular kprobe and after that issued
471 * syncrhonize_rcu_tasks(), this ensures that the instruction(s) that live in
472 * the 4 bytes after the INT3 are unused and can now be overwritten.
473 */
arch_optimize_kprobes(struct list_head * oplist)474 void arch_optimize_kprobes(struct list_head *oplist)
475 {
476 struct optimized_kprobe *op, *tmp;
477 u8 insn_buff[JMP32_INSN_SIZE];
478
479 list_for_each_entry_safe(op, tmp, oplist, list) {
480 s32 rel = (s32)((long)op->optinsn.insn -
481 ((long)op->kp.addr + JMP32_INSN_SIZE));
482
483 WARN_ON(kprobe_disabled(&op->kp));
484
485 /* Backup instructions which will be replaced by jump address */
486 memcpy(op->optinsn.copied_insn, op->kp.addr + INT3_INSN_SIZE,
487 DISP32_SIZE);
488
489 insn_buff[0] = JMP32_INSN_OPCODE;
490 *(s32 *)(&insn_buff[1]) = rel;
491
492 text_poke_bp(op->kp.addr, insn_buff, JMP32_INSN_SIZE, NULL);
493
494 list_del_init(&op->list);
495 }
496 }
497
498 /*
499 * Replace a relative jump (JMP.d32) with a breakpoint (INT3).
500 *
501 * After that, we can restore the 4 bytes after the INT3 to undo what
502 * arch_optimize_kprobes() scribbled. This is safe since those bytes will be
503 * unused once the INT3 lands.
504 */
arch_unoptimize_kprobe(struct optimized_kprobe * op)505 void arch_unoptimize_kprobe(struct optimized_kprobe *op)
506 {
507 u8 new[JMP32_INSN_SIZE] = { INT3_INSN_OPCODE, };
508 u8 old[JMP32_INSN_SIZE];
509 u8 *addr = op->kp.addr;
510
511 memcpy(old, op->kp.addr, JMP32_INSN_SIZE);
512 memcpy(new + INT3_INSN_SIZE,
513 op->optinsn.copied_insn,
514 JMP32_INSN_SIZE - INT3_INSN_SIZE);
515
516 text_poke(addr, new, INT3_INSN_SIZE);
517 text_poke_sync();
518 text_poke(addr + INT3_INSN_SIZE,
519 new + INT3_INSN_SIZE,
520 JMP32_INSN_SIZE - INT3_INSN_SIZE);
521 text_poke_sync();
522
523 perf_event_text_poke(op->kp.addr, old, JMP32_INSN_SIZE, new, JMP32_INSN_SIZE);
524 }
525
526 /*
527 * Recover original instructions and breakpoints from relative jumps.
528 * Caller must call with locking kprobe_mutex.
529 */
arch_unoptimize_kprobes(struct list_head * oplist,struct list_head * done_list)530 extern void arch_unoptimize_kprobes(struct list_head *oplist,
531 struct list_head *done_list)
532 {
533 struct optimized_kprobe *op, *tmp;
534
535 list_for_each_entry_safe(op, tmp, oplist, list) {
536 arch_unoptimize_kprobe(op);
537 list_move(&op->list, done_list);
538 }
539 }
540
setup_detour_execution(struct kprobe * p,struct pt_regs * regs,int reenter)541 int setup_detour_execution(struct kprobe *p, struct pt_regs *regs, int reenter)
542 {
543 struct optimized_kprobe *op;
544
545 if (p->flags & KPROBE_FLAG_OPTIMIZED) {
546 /* This kprobe is really able to run optimized path. */
547 op = container_of(p, struct optimized_kprobe, kp);
548 /* Detour through copied instructions */
549 regs->ip = (unsigned long)op->optinsn.insn + TMPL_END_IDX;
550 if (!reenter)
551 reset_current_kprobe();
552 return 1;
553 }
554 return 0;
555 }
556 NOKPROBE_SYMBOL(setup_detour_execution);
557