1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * arch/arm/kernel/kprobes.c
4 *
5 * Kprobes on ARM
6 *
7 * Abhishek Sagar <sagar.abhishek@gmail.com>
8 * Copyright (C) 2006, 2007 Motorola Inc.
9 *
10 * Nicolas Pitre <nico@marvell.com>
11 * Copyright (C) 2007 Marvell Ltd.
12 */
13
14 #include <linux/kernel.h>
15 #include <linux/kprobes.h>
16 #include <linux/module.h>
17 #include <linux/slab.h>
18 #include <linux/stop_machine.h>
19 #include <linux/sched/debug.h>
20 #include <linux/stringify.h>
21 #include <asm/traps.h>
22 #include <asm/opcodes.h>
23 #include <asm/cacheflush.h>
24 #include <linux/percpu.h>
25 #include <linux/bug.h>
26 #include <asm/patch.h>
27 #include <asm/sections.h>
28
29 #include "../decode-arm.h"
30 #include "../decode-thumb.h"
31 #include "core.h"
32
33 #define MIN_STACK_SIZE(addr) \
34 min((unsigned long)MAX_STACK_SIZE, \
35 (unsigned long)current_thread_info() + THREAD_START_SP - (addr))
36
37 #define flush_insns(addr, size) \
38 flush_icache_range((unsigned long)(addr), \
39 (unsigned long)(addr) + \
40 (size))
41
42 DEFINE_PER_CPU(struct kprobe *, current_kprobe) = NULL;
43 DEFINE_PER_CPU(struct kprobe_ctlblk, kprobe_ctlblk);
44
45
arch_prepare_kprobe(struct kprobe * p)46 int __kprobes arch_prepare_kprobe(struct kprobe *p)
47 {
48 kprobe_opcode_t insn;
49 kprobe_opcode_t tmp_insn[MAX_INSN_SIZE];
50 unsigned long addr = (unsigned long)p->addr;
51 bool thumb;
52 kprobe_decode_insn_t *decode_insn;
53 const union decode_action *actions;
54 int is;
55 const struct decode_checker **checkers;
56
57 #ifdef CONFIG_THUMB2_KERNEL
58 thumb = true;
59 addr &= ~1; /* Bit 0 would normally be set to indicate Thumb code */
60 insn = __mem_to_opcode_thumb16(((u16 *)addr)[0]);
61 if (is_wide_instruction(insn)) {
62 u16 inst2 = __mem_to_opcode_thumb16(((u16 *)addr)[1]);
63 insn = __opcode_thumb32_compose(insn, inst2);
64 decode_insn = thumb32_probes_decode_insn;
65 actions = kprobes_t32_actions;
66 checkers = kprobes_t32_checkers;
67 } else {
68 decode_insn = thumb16_probes_decode_insn;
69 actions = kprobes_t16_actions;
70 checkers = kprobes_t16_checkers;
71 }
72 #else /* !CONFIG_THUMB2_KERNEL */
73 thumb = false;
74 if (addr & 0x3)
75 return -EINVAL;
76 insn = __mem_to_opcode_arm(*p->addr);
77 decode_insn = arm_probes_decode_insn;
78 actions = kprobes_arm_actions;
79 checkers = kprobes_arm_checkers;
80 #endif
81
82 p->opcode = insn;
83 p->ainsn.insn = tmp_insn;
84
85 switch ((*decode_insn)(insn, &p->ainsn, true, actions, checkers)) {
86 case INSN_REJECTED: /* not supported */
87 return -EINVAL;
88
89 case INSN_GOOD: /* instruction uses slot */
90 p->ainsn.insn = get_insn_slot();
91 if (!p->ainsn.insn)
92 return -ENOMEM;
93 for (is = 0; is < MAX_INSN_SIZE; ++is)
94 p->ainsn.insn[is] = tmp_insn[is];
95 flush_insns(p->ainsn.insn,
96 sizeof(p->ainsn.insn[0]) * MAX_INSN_SIZE);
97 p->ainsn.insn_fn = (probes_insn_fn_t *)
98 ((uintptr_t)p->ainsn.insn | thumb);
99 break;
100
101 case INSN_GOOD_NO_SLOT: /* instruction doesn't need insn slot */
102 p->ainsn.insn = NULL;
103 break;
104 }
105
106 /*
107 * Never instrument insn like 'str r0, [sp, +/-r1]'. Also, insn likes
108 * 'str r0, [sp, #-68]' should also be prohibited.
109 * See __und_svc.
110 */
111 if ((p->ainsn.stack_space < 0) ||
112 (p->ainsn.stack_space > MAX_STACK_SIZE))
113 return -EINVAL;
114
115 return 0;
116 }
117
arch_arm_kprobe(struct kprobe * p)118 void __kprobes arch_arm_kprobe(struct kprobe *p)
119 {
120 unsigned int brkp;
121 void *addr;
122
123 if (IS_ENABLED(CONFIG_THUMB2_KERNEL)) {
124 /* Remove any Thumb flag */
125 addr = (void *)((uintptr_t)p->addr & ~1);
126
127 if (is_wide_instruction(p->opcode))
128 brkp = KPROBE_THUMB32_BREAKPOINT_INSTRUCTION;
129 else
130 brkp = KPROBE_THUMB16_BREAKPOINT_INSTRUCTION;
131 } else {
132 kprobe_opcode_t insn = p->opcode;
133
134 addr = p->addr;
135 brkp = KPROBE_ARM_BREAKPOINT_INSTRUCTION;
136
137 if (insn >= 0xe0000000)
138 brkp |= 0xe0000000; /* Unconditional instruction */
139 else
140 brkp |= insn & 0xf0000000; /* Copy condition from insn */
141 }
142
143 patch_text(addr, brkp);
144 }
145
146 /*
147 * The actual disarming is done here on each CPU and synchronized using
148 * stop_machine. This synchronization is necessary on SMP to avoid removing
149 * a probe between the moment the 'Undefined Instruction' exception is raised
150 * and the moment the exception handler reads the faulting instruction from
151 * memory. It is also needed to atomically set the two half-words of a 32-bit
152 * Thumb breakpoint.
153 */
154 struct patch {
155 void *addr;
156 unsigned int insn;
157 };
158
__kprobes_remove_breakpoint(void * data)159 static int __kprobes_remove_breakpoint(void *data)
160 {
161 struct patch *p = data;
162 __patch_text(p->addr, p->insn);
163 return 0;
164 }
165
kprobes_remove_breakpoint(void * addr,unsigned int insn)166 void __kprobes kprobes_remove_breakpoint(void *addr, unsigned int insn)
167 {
168 struct patch p = {
169 .addr = addr,
170 .insn = insn,
171 };
172 stop_machine_cpuslocked(__kprobes_remove_breakpoint, &p,
173 cpu_online_mask);
174 }
175
arch_disarm_kprobe(struct kprobe * p)176 void __kprobes arch_disarm_kprobe(struct kprobe *p)
177 {
178 kprobes_remove_breakpoint((void *)((uintptr_t)p->addr & ~1),
179 p->opcode);
180 }
181
arch_remove_kprobe(struct kprobe * p)182 void __kprobes arch_remove_kprobe(struct kprobe *p)
183 {
184 if (p->ainsn.insn) {
185 free_insn_slot(p->ainsn.insn, 0);
186 p->ainsn.insn = NULL;
187 }
188 }
189
save_previous_kprobe(struct kprobe_ctlblk * kcb)190 static void __kprobes save_previous_kprobe(struct kprobe_ctlblk *kcb)
191 {
192 kcb->prev_kprobe.kp = kprobe_running();
193 kcb->prev_kprobe.status = kcb->kprobe_status;
194 }
195
restore_previous_kprobe(struct kprobe_ctlblk * kcb)196 static void __kprobes restore_previous_kprobe(struct kprobe_ctlblk *kcb)
197 {
198 __this_cpu_write(current_kprobe, kcb->prev_kprobe.kp);
199 kcb->kprobe_status = kcb->prev_kprobe.status;
200 }
201
set_current_kprobe(struct kprobe * p)202 static void __kprobes set_current_kprobe(struct kprobe *p)
203 {
204 __this_cpu_write(current_kprobe, p);
205 }
206
207 static void __kprobes
singlestep_skip(struct kprobe * p,struct pt_regs * regs)208 singlestep_skip(struct kprobe *p, struct pt_regs *regs)
209 {
210 #ifdef CONFIG_THUMB2_KERNEL
211 regs->ARM_cpsr = it_advance(regs->ARM_cpsr);
212 if (is_wide_instruction(p->opcode))
213 regs->ARM_pc += 4;
214 else
215 regs->ARM_pc += 2;
216 #else
217 regs->ARM_pc += 4;
218 #endif
219 }
220
221 static inline void __kprobes
singlestep(struct kprobe * p,struct pt_regs * regs,struct kprobe_ctlblk * kcb)222 singlestep(struct kprobe *p, struct pt_regs *regs, struct kprobe_ctlblk *kcb)
223 {
224 p->ainsn.insn_singlestep(p->opcode, &p->ainsn, regs);
225 }
226
227 /*
228 * Called with IRQs disabled. IRQs must remain disabled from that point
229 * all the way until processing this kprobe is complete. The current
230 * kprobes implementation cannot process more than one nested level of
231 * kprobe, and that level is reserved for user kprobe handlers, so we can't
232 * risk encountering a new kprobe in an interrupt handler.
233 */
kprobe_handler(struct pt_regs * regs)234 static void __kprobes kprobe_handler(struct pt_regs *regs)
235 {
236 struct kprobe *p, *cur;
237 struct kprobe_ctlblk *kcb;
238
239 kcb = get_kprobe_ctlblk();
240 cur = kprobe_running();
241
242 #ifdef CONFIG_THUMB2_KERNEL
243 /*
244 * First look for a probe which was registered using an address with
245 * bit 0 set, this is the usual situation for pointers to Thumb code.
246 * If not found, fallback to looking for one with bit 0 clear.
247 */
248 p = get_kprobe((kprobe_opcode_t *)(regs->ARM_pc | 1));
249 if (!p)
250 p = get_kprobe((kprobe_opcode_t *)regs->ARM_pc);
251
252 #else /* ! CONFIG_THUMB2_KERNEL */
253 p = get_kprobe((kprobe_opcode_t *)regs->ARM_pc);
254 #endif
255
256 if (p) {
257 if (!p->ainsn.insn_check_cc(regs->ARM_cpsr)) {
258 /*
259 * Probe hit but conditional execution check failed,
260 * so just skip the instruction and continue as if
261 * nothing had happened.
262 * In this case, we can skip recursing check too.
263 */
264 singlestep_skip(p, regs);
265 } else if (cur) {
266 /* Kprobe is pending, so we're recursing. */
267 switch (kcb->kprobe_status) {
268 case KPROBE_HIT_ACTIVE:
269 case KPROBE_HIT_SSDONE:
270 case KPROBE_HIT_SS:
271 /* A pre- or post-handler probe got us here. */
272 kprobes_inc_nmissed_count(p);
273 save_previous_kprobe(kcb);
274 set_current_kprobe(p);
275 kcb->kprobe_status = KPROBE_REENTER;
276 singlestep(p, regs, kcb);
277 restore_previous_kprobe(kcb);
278 break;
279 case KPROBE_REENTER:
280 /* A nested probe was hit in FIQ, it is a BUG */
281 pr_warn("Unrecoverable kprobe detected.\n");
282 dump_kprobe(p);
283 fallthrough;
284 default:
285 /* impossible cases */
286 BUG();
287 }
288 } else {
289 /* Probe hit and conditional execution check ok. */
290 set_current_kprobe(p);
291 kcb->kprobe_status = KPROBE_HIT_ACTIVE;
292
293 /*
294 * If we have no pre-handler or it returned 0, we
295 * continue with normal processing. If we have a
296 * pre-handler and it returned non-zero, it will
297 * modify the execution path and no need to single
298 * stepping. Let's just reset current kprobe and exit.
299 */
300 if (!p->pre_handler || !p->pre_handler(p, regs)) {
301 kcb->kprobe_status = KPROBE_HIT_SS;
302 singlestep(p, regs, kcb);
303 if (p->post_handler) {
304 kcb->kprobe_status = KPROBE_HIT_SSDONE;
305 p->post_handler(p, regs, 0);
306 }
307 }
308 reset_current_kprobe();
309 }
310 } else {
311 /*
312 * The probe was removed and a race is in progress.
313 * There is nothing we can do about it. Let's restart
314 * the instruction. By the time we can restart, the
315 * real instruction will be there.
316 */
317 }
318 }
319
kprobe_trap_handler(struct pt_regs * regs,unsigned int instr)320 static int __kprobes kprobe_trap_handler(struct pt_regs *regs, unsigned int instr)
321 {
322 unsigned long flags;
323 local_irq_save(flags);
324 kprobe_handler(regs);
325 local_irq_restore(flags);
326 return 0;
327 }
328
kprobe_fault_handler(struct pt_regs * regs,unsigned int fsr)329 int __kprobes kprobe_fault_handler(struct pt_regs *regs, unsigned int fsr)
330 {
331 struct kprobe *cur = kprobe_running();
332 struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
333
334 switch (kcb->kprobe_status) {
335 case KPROBE_HIT_SS:
336 case KPROBE_REENTER:
337 /*
338 * We are here because the instruction being single
339 * stepped caused a page fault. We reset the current
340 * kprobe and the PC to point back to the probe address
341 * and allow the page fault handler to continue as a
342 * normal page fault.
343 */
344 regs->ARM_pc = (long)cur->addr;
345 if (kcb->kprobe_status == KPROBE_REENTER) {
346 restore_previous_kprobe(kcb);
347 } else {
348 reset_current_kprobe();
349 }
350 break;
351
352 case KPROBE_HIT_ACTIVE:
353 case KPROBE_HIT_SSDONE:
354 /*
355 * We increment the nmissed count for accounting,
356 * we can also use npre/npostfault count for accounting
357 * these specific fault cases.
358 */
359 kprobes_inc_nmissed_count(cur);
360
361 /*
362 * We come here because instructions in the pre/post
363 * handler caused the page_fault, this could happen
364 * if handler tries to access user space by
365 * copy_from_user(), get_user() etc. Let the
366 * user-specified handler try to fix it.
367 */
368 if (cur->fault_handler && cur->fault_handler(cur, regs, fsr))
369 return 1;
370 break;
371
372 default:
373 break;
374 }
375
376 return 0;
377 }
378
kprobe_exceptions_notify(struct notifier_block * self,unsigned long val,void * data)379 int __kprobes kprobe_exceptions_notify(struct notifier_block *self,
380 unsigned long val, void *data)
381 {
382 /*
383 * notify_die() is currently never called on ARM,
384 * so this callback is currently empty.
385 */
386 return NOTIFY_DONE;
387 }
388
389 /*
390 * When a retprobed function returns, trampoline_handler() is called,
391 * calling the kretprobe's handler. We construct a struct pt_regs to
392 * give a view of registers r0-r11 to the user return-handler. This is
393 * not a complete pt_regs structure, but that should be plenty sufficient
394 * for kretprobe handlers which should normally be interested in r0 only
395 * anyway.
396 */
kretprobe_trampoline(void)397 void __naked __kprobes kretprobe_trampoline(void)
398 {
399 __asm__ __volatile__ (
400 "stmdb sp!, {r0 - r11} \n\t"
401 "mov r0, sp \n\t"
402 "bl trampoline_handler \n\t"
403 "mov lr, r0 \n\t"
404 "ldmia sp!, {r0 - r11} \n\t"
405 #ifdef CONFIG_THUMB2_KERNEL
406 "bx lr \n\t"
407 #else
408 "mov pc, lr \n\t"
409 #endif
410 : : : "memory");
411 }
412
413 /* Called from kretprobe_trampoline */
trampoline_handler(struct pt_regs * regs)414 static __used __kprobes void *trampoline_handler(struct pt_regs *regs)
415 {
416 return (void *)kretprobe_trampoline_handler(regs, &kretprobe_trampoline,
417 (void *)regs->ARM_fp);
418 }
419
arch_prepare_kretprobe(struct kretprobe_instance * ri,struct pt_regs * regs)420 void __kprobes arch_prepare_kretprobe(struct kretprobe_instance *ri,
421 struct pt_regs *regs)
422 {
423 ri->ret_addr = (kprobe_opcode_t *)regs->ARM_lr;
424 ri->fp = (void *)regs->ARM_fp;
425
426 /* Replace the return addr with trampoline addr. */
427 regs->ARM_lr = (unsigned long)&kretprobe_trampoline;
428 }
429
arch_trampoline_kprobe(struct kprobe * p)430 int __kprobes arch_trampoline_kprobe(struct kprobe *p)
431 {
432 return 0;
433 }
434
435 #ifdef CONFIG_THUMB2_KERNEL
436
437 static struct undef_hook kprobes_thumb16_break_hook = {
438 .instr_mask = 0xffff,
439 .instr_val = KPROBE_THUMB16_BREAKPOINT_INSTRUCTION,
440 .cpsr_mask = MODE_MASK,
441 .cpsr_val = SVC_MODE,
442 .fn = kprobe_trap_handler,
443 };
444
445 static struct undef_hook kprobes_thumb32_break_hook = {
446 .instr_mask = 0xffffffff,
447 .instr_val = KPROBE_THUMB32_BREAKPOINT_INSTRUCTION,
448 .cpsr_mask = MODE_MASK,
449 .cpsr_val = SVC_MODE,
450 .fn = kprobe_trap_handler,
451 };
452
453 #else /* !CONFIG_THUMB2_KERNEL */
454
455 static struct undef_hook kprobes_arm_break_hook = {
456 .instr_mask = 0x0fffffff,
457 .instr_val = KPROBE_ARM_BREAKPOINT_INSTRUCTION,
458 .cpsr_mask = MODE_MASK,
459 .cpsr_val = SVC_MODE,
460 .fn = kprobe_trap_handler,
461 };
462
463 #endif /* !CONFIG_THUMB2_KERNEL */
464
arch_init_kprobes(void)465 int __init arch_init_kprobes(void)
466 {
467 arm_probes_decode_init();
468 #ifdef CONFIG_THUMB2_KERNEL
469 register_undef_hook(&kprobes_thumb16_break_hook);
470 register_undef_hook(&kprobes_thumb32_break_hook);
471 #else
472 register_undef_hook(&kprobes_arm_break_hook);
473 #endif
474 return 0;
475 }
476
arch_within_kprobe_blacklist(unsigned long addr)477 bool arch_within_kprobe_blacklist(unsigned long addr)
478 {
479 void *a = (void *)addr;
480
481 return __in_irqentry_text(addr) ||
482 in_entry_text(addr) ||
483 in_idmap_text(addr) ||
484 memory_contains(__kprobes_text_start, __kprobes_text_end, a, 1);
485 }
486