• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * arch/tile/kernel/kprobes.c
3  * Kprobes on TILE-Gx
4  *
5  * Some portions copied from the MIPS version.
6  *
7  * Copyright (C) IBM Corporation, 2002, 2004
8  * Copyright 2006 Sony Corp.
9  * Copyright 2010 Cavium Networks
10  *
11  * Copyright 2012 Tilera Corporation. All Rights Reserved.
12  *
13  *   This program is free software; you can redistribute it and/or
14  *   modify it under the terms of the GNU General Public License
15  *   as published by the Free Software Foundation, version 2.
16  *
17  *   This program is distributed in the hope that it will be useful, but
18  *   WITHOUT ANY WARRANTY; without even the implied warranty of
19  *   MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
20  *   NON INFRINGEMENT.  See the GNU General Public License for
21  *   more details.
22  */
23 
24 #include <linux/kprobes.h>
25 #include <linux/kdebug.h>
26 #include <linux/module.h>
27 #include <linux/slab.h>
28 #include <linux/uaccess.h>
29 #include <asm/cacheflush.h>
30 
31 #include <arch/opcode.h>
32 
33 DEFINE_PER_CPU(struct kprobe *, current_kprobe) = NULL;
34 DEFINE_PER_CPU(struct kprobe_ctlblk, kprobe_ctlblk);
35 
36 tile_bundle_bits breakpoint_insn = TILEGX_BPT_BUNDLE;
37 tile_bundle_bits breakpoint2_insn = TILEGX_BPT_BUNDLE | DIE_SSTEPBP;
38 
39 /*
40  * Check whether instruction is branch or jump, or if executing it
41  * has different results depending on where it is executed (e.g. lnk).
42  */
insn_has_control(kprobe_opcode_t insn)43 static int __kprobes insn_has_control(kprobe_opcode_t insn)
44 {
45 	if (get_Mode(insn) != 0) {   /* Y-format bundle */
46 		if (get_Opcode_Y1(insn) != RRR_1_OPCODE_Y1 ||
47 		    get_RRROpcodeExtension_Y1(insn) != UNARY_RRR_1_OPCODE_Y1)
48 			return 0;
49 
50 		switch (get_UnaryOpcodeExtension_Y1(insn)) {
51 		case JALRP_UNARY_OPCODE_Y1:
52 		case JALR_UNARY_OPCODE_Y1:
53 		case JRP_UNARY_OPCODE_Y1:
54 		case JR_UNARY_OPCODE_Y1:
55 		case LNK_UNARY_OPCODE_Y1:
56 			return 1;
57 		default:
58 			return 0;
59 		}
60 	}
61 
62 	switch (get_Opcode_X1(insn)) {
63 	case BRANCH_OPCODE_X1:	/* branch instructions */
64 	case JUMP_OPCODE_X1:	/* jump instructions: j and jal */
65 		return 1;
66 
67 	case RRR_0_OPCODE_X1:   /* other jump instructions */
68 		if (get_RRROpcodeExtension_X1(insn) != UNARY_RRR_0_OPCODE_X1)
69 			return 0;
70 		switch (get_UnaryOpcodeExtension_X1(insn)) {
71 		case JALRP_UNARY_OPCODE_X1:
72 		case JALR_UNARY_OPCODE_X1:
73 		case JRP_UNARY_OPCODE_X1:
74 		case JR_UNARY_OPCODE_X1:
75 		case LNK_UNARY_OPCODE_X1:
76 			return 1;
77 		default:
78 			return 0;
79 		}
80 	default:
81 		return 0;
82 	}
83 }
84 
arch_prepare_kprobe(struct kprobe * p)85 int __kprobes arch_prepare_kprobe(struct kprobe *p)
86 {
87 	unsigned long addr = (unsigned long)p->addr;
88 
89 	if (addr & (sizeof(kprobe_opcode_t) - 1))
90 		return -EINVAL;
91 
92 	if (insn_has_control(*p->addr)) {
93 		pr_notice("Kprobes for control instructions are not "
94 			  "supported\n");
95 		return -EINVAL;
96 	}
97 
98 	/* insn: must be on special executable page on tile. */
99 	p->ainsn.insn = get_insn_slot();
100 	if (!p->ainsn.insn)
101 		return -ENOMEM;
102 
103 	/*
104 	 * In the kprobe->ainsn.insn[] array we store the original
105 	 * instruction at index zero and a break trap instruction at
106 	 * index one.
107 	 */
108 	memcpy(&p->ainsn.insn[0], p->addr, sizeof(kprobe_opcode_t));
109 	p->ainsn.insn[1] = breakpoint2_insn;
110 	p->opcode = *p->addr;
111 
112 	return 0;
113 }
114 
arch_arm_kprobe(struct kprobe * p)115 void __kprobes arch_arm_kprobe(struct kprobe *p)
116 {
117 	unsigned long addr_wr;
118 
119 	/* Operate on writable kernel text mapping. */
120 	addr_wr = (unsigned long)p->addr - MEM_SV_START + PAGE_OFFSET;
121 
122 	if (probe_kernel_write((void *)addr_wr, &breakpoint_insn,
123 		sizeof(breakpoint_insn)))
124 		pr_err("%s: failed to enable kprobe\n", __func__);
125 
126 	smp_wmb();
127 	flush_insn_slot(p);
128 }
129 
arch_disarm_kprobe(struct kprobe * kp)130 void __kprobes arch_disarm_kprobe(struct kprobe *kp)
131 {
132 	unsigned long addr_wr;
133 
134 	/* Operate on writable kernel text mapping. */
135 	addr_wr = (unsigned long)kp->addr - MEM_SV_START + PAGE_OFFSET;
136 
137 	if (probe_kernel_write((void *)addr_wr, &kp->opcode,
138 		sizeof(kp->opcode)))
139 		pr_err("%s: failed to enable kprobe\n", __func__);
140 
141 	smp_wmb();
142 	flush_insn_slot(kp);
143 }
144 
arch_remove_kprobe(struct kprobe * p)145 void __kprobes arch_remove_kprobe(struct kprobe *p)
146 {
147 	if (p->ainsn.insn) {
148 		free_insn_slot(p->ainsn.insn, 0);
149 		p->ainsn.insn = NULL;
150 	}
151 }
152 
save_previous_kprobe(struct kprobe_ctlblk * kcb)153 static void __kprobes save_previous_kprobe(struct kprobe_ctlblk *kcb)
154 {
155 	kcb->prev_kprobe.kp = kprobe_running();
156 	kcb->prev_kprobe.status = kcb->kprobe_status;
157 	kcb->prev_kprobe.saved_pc = kcb->kprobe_saved_pc;
158 }
159 
restore_previous_kprobe(struct kprobe_ctlblk * kcb)160 static void __kprobes restore_previous_kprobe(struct kprobe_ctlblk *kcb)
161 {
162 	__this_cpu_write(current_kprobe, kcb->prev_kprobe.kp);
163 	kcb->kprobe_status = kcb->prev_kprobe.status;
164 	kcb->kprobe_saved_pc = kcb->prev_kprobe.saved_pc;
165 }
166 
set_current_kprobe(struct kprobe * p,struct pt_regs * regs,struct kprobe_ctlblk * kcb)167 static void __kprobes set_current_kprobe(struct kprobe *p, struct pt_regs *regs,
168 			struct kprobe_ctlblk *kcb)
169 {
170 	__this_cpu_write(current_kprobe, p);
171 	kcb->kprobe_saved_pc = regs->pc;
172 }
173 
prepare_singlestep(struct kprobe * p,struct pt_regs * regs)174 static void __kprobes prepare_singlestep(struct kprobe *p, struct pt_regs *regs)
175 {
176 	/* Single step inline if the instruction is a break. */
177 	if (p->opcode == breakpoint_insn ||
178 	    p->opcode == breakpoint2_insn)
179 		regs->pc = (unsigned long)p->addr;
180 	else
181 		regs->pc = (unsigned long)&p->ainsn.insn[0];
182 }
183 
kprobe_handler(struct pt_regs * regs)184 static int __kprobes kprobe_handler(struct pt_regs *regs)
185 {
186 	struct kprobe *p;
187 	int ret = 0;
188 	kprobe_opcode_t *addr;
189 	struct kprobe_ctlblk *kcb;
190 
191 	addr = (kprobe_opcode_t *)regs->pc;
192 
193 	/*
194 	 * We don't want to be preempted for the entire
195 	 * duration of kprobe processing.
196 	 */
197 	preempt_disable();
198 	kcb = get_kprobe_ctlblk();
199 
200 	/* Check we're not actually recursing. */
201 	if (kprobe_running()) {
202 		p = get_kprobe(addr);
203 		if (p) {
204 			if (kcb->kprobe_status == KPROBE_HIT_SS &&
205 			    p->ainsn.insn[0] == breakpoint_insn) {
206 				goto no_kprobe;
207 			}
208 			/*
209 			 * We have reentered the kprobe_handler(), since
210 			 * another probe was hit while within the handler.
211 			 * We here save the original kprobes variables and
212 			 * just single step on the instruction of the new probe
213 			 * without calling any user handlers.
214 			 */
215 			save_previous_kprobe(kcb);
216 			set_current_kprobe(p, regs, kcb);
217 			kprobes_inc_nmissed_count(p);
218 			prepare_singlestep(p, regs);
219 			kcb->kprobe_status = KPROBE_REENTER;
220 			return 1;
221 		} else {
222 			if (*addr != breakpoint_insn) {
223 				/*
224 				 * The breakpoint instruction was removed by
225 				 * another cpu right after we hit, no further
226 				 * handling of this interrupt is appropriate.
227 				 */
228 				ret = 1;
229 				goto no_kprobe;
230 			}
231 			p = __this_cpu_read(current_kprobe);
232 			if (p->break_handler && p->break_handler(p, regs))
233 				goto ss_probe;
234 		}
235 		goto no_kprobe;
236 	}
237 
238 	p = get_kprobe(addr);
239 	if (!p) {
240 		if (*addr != breakpoint_insn) {
241 			/*
242 			 * The breakpoint instruction was removed right
243 			 * after we hit it.  Another cpu has removed
244 			 * either a probepoint or a debugger breakpoint
245 			 * at this address.  In either case, no further
246 			 * handling of this interrupt is appropriate.
247 			 */
248 			ret = 1;
249 		}
250 		/* Not one of ours: let kernel handle it. */
251 		goto no_kprobe;
252 	}
253 
254 	set_current_kprobe(p, regs, kcb);
255 	kcb->kprobe_status = KPROBE_HIT_ACTIVE;
256 
257 	if (p->pre_handler && p->pre_handler(p, regs)) {
258 		/* Handler has already set things up, so skip ss setup. */
259 		return 1;
260 	}
261 
262 ss_probe:
263 	prepare_singlestep(p, regs);
264 	kcb->kprobe_status = KPROBE_HIT_SS;
265 	return 1;
266 
267 no_kprobe:
268 	preempt_enable_no_resched();
269 	return ret;
270 }
271 
272 /*
273  * Called after single-stepping.  p->addr is the address of the
274  * instruction that has been replaced by the breakpoint. To avoid the
275  * SMP problems that can occur when we temporarily put back the
276  * original opcode to single-step, we single-stepped a copy of the
277  * instruction.  The address of this copy is p->ainsn.insn.
278  *
279  * This function prepares to return from the post-single-step
280  * breakpoint trap.
281  */
resume_execution(struct kprobe * p,struct pt_regs * regs,struct kprobe_ctlblk * kcb)282 static void __kprobes resume_execution(struct kprobe *p,
283 				       struct pt_regs *regs,
284 				       struct kprobe_ctlblk *kcb)
285 {
286 	unsigned long orig_pc = kcb->kprobe_saved_pc;
287 	regs->pc = orig_pc + 8;
288 }
289 
post_kprobe_handler(struct pt_regs * regs)290 static inline int post_kprobe_handler(struct pt_regs *regs)
291 {
292 	struct kprobe *cur = kprobe_running();
293 	struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
294 
295 	if (!cur)
296 		return 0;
297 
298 	if ((kcb->kprobe_status != KPROBE_REENTER) && cur->post_handler) {
299 		kcb->kprobe_status = KPROBE_HIT_SSDONE;
300 		cur->post_handler(cur, regs, 0);
301 	}
302 
303 	resume_execution(cur, regs, kcb);
304 
305 	/* Restore back the original saved kprobes variables and continue. */
306 	if (kcb->kprobe_status == KPROBE_REENTER) {
307 		restore_previous_kprobe(kcb);
308 		goto out;
309 	}
310 	reset_current_kprobe();
311 out:
312 	preempt_enable_no_resched();
313 
314 	return 1;
315 }
316 
kprobe_fault_handler(struct pt_regs * regs,int trapnr)317 static inline int kprobe_fault_handler(struct pt_regs *regs, int trapnr)
318 {
319 	struct kprobe *cur = kprobe_running();
320 	struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
321 
322 	if (cur->fault_handler && cur->fault_handler(cur, regs, trapnr))
323 		return 1;
324 
325 	if (kcb->kprobe_status & KPROBE_HIT_SS) {
326 		/*
327 		 * We are here because the instruction being single
328 		 * stepped caused a page fault. We reset the current
329 		 * kprobe and the ip points back to the probe address
330 		 * and allow the page fault handler to continue as a
331 		 * normal page fault.
332 		 */
333 		resume_execution(cur, regs, kcb);
334 		reset_current_kprobe();
335 		preempt_enable_no_resched();
336 	}
337 	return 0;
338 }
339 
340 /*
341  * Wrapper routine for handling exceptions.
342  */
kprobe_exceptions_notify(struct notifier_block * self,unsigned long val,void * data)343 int __kprobes kprobe_exceptions_notify(struct notifier_block *self,
344 				       unsigned long val, void *data)
345 {
346 	struct die_args *args = (struct die_args *)data;
347 	int ret = NOTIFY_DONE;
348 
349 	switch (val) {
350 	case DIE_BREAK:
351 		if (kprobe_handler(args->regs))
352 			ret = NOTIFY_STOP;
353 		break;
354 	case DIE_SSTEPBP:
355 		if (post_kprobe_handler(args->regs))
356 			ret = NOTIFY_STOP;
357 		break;
358 	case DIE_PAGE_FAULT:
359 		/* kprobe_running() needs smp_processor_id(). */
360 		preempt_disable();
361 
362 		if (kprobe_running()
363 		    && kprobe_fault_handler(args->regs, args->trapnr))
364 			ret = NOTIFY_STOP;
365 		preempt_enable();
366 		break;
367 	default:
368 		break;
369 	}
370 	return ret;
371 }
372 
setjmp_pre_handler(struct kprobe * p,struct pt_regs * regs)373 int __kprobes setjmp_pre_handler(struct kprobe *p, struct pt_regs *regs)
374 {
375 	struct jprobe *jp = container_of(p, struct jprobe, kp);
376 	struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
377 
378 	kcb->jprobe_saved_regs = *regs;
379 	kcb->jprobe_saved_sp = regs->sp;
380 
381 	memcpy(kcb->jprobes_stack, (void *)kcb->jprobe_saved_sp,
382 	       MIN_JPROBES_STACK_SIZE(kcb->jprobe_saved_sp));
383 
384 	regs->pc = (unsigned long)(jp->entry);
385 
386 	return 1;
387 }
388 
389 /* Defined in the inline asm below. */
390 void jprobe_return_end(void);
391 
jprobe_return(void)392 void __kprobes jprobe_return(void)
393 {
394 	asm volatile(
395 		"bpt\n\t"
396 		".globl jprobe_return_end\n"
397 		"jprobe_return_end:\n");
398 }
399 
longjmp_break_handler(struct kprobe * p,struct pt_regs * regs)400 int __kprobes longjmp_break_handler(struct kprobe *p, struct pt_regs *regs)
401 {
402 	struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
403 
404 	if (regs->pc >= (unsigned long)jprobe_return &&
405 	    regs->pc <= (unsigned long)jprobe_return_end) {
406 		*regs = kcb->jprobe_saved_regs;
407 		memcpy((void *)kcb->jprobe_saved_sp, kcb->jprobes_stack,
408 		       MIN_JPROBES_STACK_SIZE(kcb->jprobe_saved_sp));
409 		preempt_enable_no_resched();
410 
411 		return 1;
412 	}
413 	return 0;
414 }
415 
416 /*
417  * Function return probe trampoline:
418  * - init_kprobes() establishes a probepoint here
419  * - When the probed function returns, this probe causes the
420  *   handlers to fire
421  */
kretprobe_trampoline_holder(void)422 static void __used kretprobe_trampoline_holder(void)
423 {
424 	asm volatile(
425 		"nop\n\t"
426 		".global kretprobe_trampoline\n"
427 		"kretprobe_trampoline:\n\t"
428 		"nop\n\t"
429 		: : : "memory");
430 }
431 
432 void kretprobe_trampoline(void);
433 
arch_prepare_kretprobe(struct kretprobe_instance * ri,struct pt_regs * regs)434 void __kprobes arch_prepare_kretprobe(struct kretprobe_instance *ri,
435 				      struct pt_regs *regs)
436 {
437 	ri->ret_addr = (kprobe_opcode_t *) regs->lr;
438 
439 	/* Replace the return addr with trampoline addr */
440 	regs->lr = (unsigned long)kretprobe_trampoline;
441 }
442 
443 /*
444  * Called when the probe at kretprobe trampoline is hit.
445  */
trampoline_probe_handler(struct kprobe * p,struct pt_regs * regs)446 static int __kprobes trampoline_probe_handler(struct kprobe *p,
447 						struct pt_regs *regs)
448 {
449 	struct kretprobe_instance *ri = NULL;
450 	struct hlist_head *head, empty_rp;
451 	struct hlist_node *tmp;
452 	unsigned long flags, orig_ret_address = 0;
453 	unsigned long trampoline_address = (unsigned long)kretprobe_trampoline;
454 
455 	INIT_HLIST_HEAD(&empty_rp);
456 	kretprobe_hash_lock(current, &head, &flags);
457 
458 	/*
459 	 * It is possible to have multiple instances associated with a given
460 	 * task either because multiple functions in the call path have
461 	 * a return probe installed on them, and/or more than one return
462 	 * return probe was registered for a target function.
463 	 *
464 	 * We can handle this because:
465 	 *     - instances are always inserted at the head of the list
466 	 *     - when multiple return probes are registered for the same
467 	 *       function, the first instance's ret_addr will point to the
468 	 *       real return address, and all the rest will point to
469 	 *       kretprobe_trampoline
470 	 */
471 	hlist_for_each_entry_safe(ri, tmp, head, hlist) {
472 		if (ri->task != current)
473 			/* another task is sharing our hash bucket */
474 			continue;
475 
476 		if (ri->rp && ri->rp->handler)
477 			ri->rp->handler(ri, regs);
478 
479 		orig_ret_address = (unsigned long)ri->ret_addr;
480 		recycle_rp_inst(ri, &empty_rp);
481 
482 		if (orig_ret_address != trampoline_address) {
483 			/*
484 			 * This is the real return address. Any other
485 			 * instances associated with this task are for
486 			 * other calls deeper on the call stack
487 			 */
488 			break;
489 		}
490 	}
491 
492 	kretprobe_assert(ri, orig_ret_address, trampoline_address);
493 	instruction_pointer(regs) = orig_ret_address;
494 
495 	reset_current_kprobe();
496 	kretprobe_hash_unlock(current, &flags);
497 	preempt_enable_no_resched();
498 
499 	hlist_for_each_entry_safe(ri, tmp, &empty_rp, hlist) {
500 		hlist_del(&ri->hlist);
501 		kfree(ri);
502 	}
503 	/*
504 	 * By returning a non-zero value, we are telling
505 	 * kprobe_handler() that we don't want the post_handler
506 	 * to run (and have re-enabled preemption)
507 	 */
508 	return 1;
509 }
510 
arch_trampoline_kprobe(struct kprobe * p)511 int __kprobes arch_trampoline_kprobe(struct kprobe *p)
512 {
513 	if (p->addr == (kprobe_opcode_t *)kretprobe_trampoline)
514 		return 1;
515 
516 	return 0;
517 }
518 
519 static struct kprobe trampoline_p = {
520 	.addr = (kprobe_opcode_t *)kretprobe_trampoline,
521 	.pre_handler = trampoline_probe_handler
522 };
523 
arch_init_kprobes(void)524 int __init arch_init_kprobes(void)
525 {
526 	register_kprobe(&trampoline_p);
527 	return 0;
528 }
529