1 /*
2 * Kernel Probes (KProbes)
3 * kernel/kprobes.c
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation; either version 2 of the License, or
8 * (at your option) any later version.
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
14 *
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
18 *
19 * Copyright (C) IBM Corporation, 2002, 2004
20 *
21 * 2002-Oct Created by Vamsi Krishna S <vamsi_krishna@in.ibm.com> Kernel
22 * Probes initial implementation (includes suggestions from
23 * Rusty Russell).
24 * 2004-Aug Updated by Prasanna S Panchamukhi <prasanna@in.ibm.com> with
25 * hlists and exceptions notifier as suggested by Andi Kleen.
26 * 2004-July Suparna Bhattacharya <suparna@in.ibm.com> added jumper probes
27 * interface to access function arguments.
28 * 2004-Sep Prasanna S Panchamukhi <prasanna@in.ibm.com> Changed Kprobes
29 * exceptions notifier to be first on the priority list.
30 * 2005-May Hien Nguyen <hien@us.ibm.com>, Jim Keniston
31 * <jkenisto@us.ibm.com> and Prasanna S Panchamukhi
32 * <prasanna@in.ibm.com> added function-return probes.
33 */
34 #include <linux/kprobes.h>
35 #include <linux/hash.h>
36 #include <linux/init.h>
37 #include <linux/slab.h>
38 #include <linux/stddef.h>
39 #include <linux/export.h>
40 #include <linux/moduleloader.h>
41 #include <linux/kallsyms.h>
42 #include <linux/freezer.h>
43 #include <linux/seq_file.h>
44 #include <linux/debugfs.h>
45 #include <linux/sysctl.h>
46 #include <linux/kdebug.h>
47 #include <linux/memory.h>
48 #include <linux/ftrace.h>
49 #include <linux/cpu.h>
50 #include <linux/jump_label.h>
51
52 #include <asm-generic/sections.h>
53 #include <asm/cacheflush.h>
54 #include <asm/errno.h>
55 #include <asm/uaccess.h>
56
57 #define KPROBE_HASH_BITS 6
58 #define KPROBE_TABLE_SIZE (1 << KPROBE_HASH_BITS)
59
60
61 /*
62 * Some oddball architectures like 64bit powerpc have function descriptors
63 * so this must be overridable.
64 */
65 #ifndef kprobe_lookup_name
66 #define kprobe_lookup_name(name, addr) \
67 addr = ((kprobe_opcode_t *)(kallsyms_lookup_name(name)))
68 #endif
69
70 static int kprobes_initialized;
71 static struct hlist_head kprobe_table[KPROBE_TABLE_SIZE];
72 static struct hlist_head kretprobe_inst_table[KPROBE_TABLE_SIZE];
73
74 /* NOTE: change this value only with kprobe_mutex held */
75 static bool kprobes_all_disarmed;
76
77 /* This protects kprobe_table and optimizing_list */
78 static DEFINE_MUTEX(kprobe_mutex);
79 static DEFINE_PER_CPU(struct kprobe *, kprobe_instance) = NULL;
80 static struct {
81 raw_spinlock_t lock ____cacheline_aligned_in_smp;
82 } kretprobe_table_locks[KPROBE_TABLE_SIZE];
83
kretprobe_table_lock_ptr(unsigned long hash)84 static raw_spinlock_t *kretprobe_table_lock_ptr(unsigned long hash)
85 {
86 return &(kretprobe_table_locks[hash].lock);
87 }
88
89 /*
90 * Normally, functions that we'd want to prohibit kprobes in, are marked
91 * __kprobes. But, there are cases where such functions already belong to
92 * a different section (__sched for preempt_schedule)
93 *
94 * For such cases, we now have a blacklist
95 */
96 static struct kprobe_blackpoint kprobe_blacklist[] = {
97 {"preempt_schedule",},
98 {"native_get_debugreg",},
99 {"irq_entries_start",},
100 {"common_interrupt",},
101 {"mcount",}, /* mcount can be called from everywhere */
102 {NULL} /* Terminator */
103 };
104
105 #ifdef __ARCH_WANT_KPROBES_INSN_SLOT
106 /*
107 * kprobe->ainsn.insn points to the copy of the instruction to be
108 * single-stepped. x86_64, POWER4 and above have no-exec support and
109 * stepping on the instruction on a vmalloced/kmalloced/data page
110 * is a recipe for disaster
111 */
112 struct kprobe_insn_page {
113 struct list_head list;
114 kprobe_opcode_t *insns; /* Page of instruction slots */
115 int nused;
116 int ngarbage;
117 char slot_used[];
118 };
119
120 #define KPROBE_INSN_PAGE_SIZE(slots) \
121 (offsetof(struct kprobe_insn_page, slot_used) + \
122 (sizeof(char) * (slots)))
123
124 struct kprobe_insn_cache {
125 struct list_head pages; /* list of kprobe_insn_page */
126 size_t insn_size; /* size of instruction slot */
127 int nr_garbage;
128 };
129
slots_per_page(struct kprobe_insn_cache * c)130 static int slots_per_page(struct kprobe_insn_cache *c)
131 {
132 return PAGE_SIZE/(c->insn_size * sizeof(kprobe_opcode_t));
133 }
134
135 enum kprobe_slot_state {
136 SLOT_CLEAN = 0,
137 SLOT_DIRTY = 1,
138 SLOT_USED = 2,
139 };
140
141 static DEFINE_MUTEX(kprobe_insn_mutex); /* Protects kprobe_insn_slots */
142 static struct kprobe_insn_cache kprobe_insn_slots = {
143 .pages = LIST_HEAD_INIT(kprobe_insn_slots.pages),
144 .insn_size = MAX_INSN_SIZE,
145 .nr_garbage = 0,
146 };
147 static int __kprobes collect_garbage_slots(struct kprobe_insn_cache *c);
148
149 /**
150 * __get_insn_slot() - Find a slot on an executable page for an instruction.
151 * We allocate an executable page if there's no room on existing ones.
152 */
__get_insn_slot(struct kprobe_insn_cache * c)153 static kprobe_opcode_t __kprobes *__get_insn_slot(struct kprobe_insn_cache *c)
154 {
155 struct kprobe_insn_page *kip;
156
157 retry:
158 list_for_each_entry(kip, &c->pages, list) {
159 if (kip->nused < slots_per_page(c)) {
160 int i;
161 for (i = 0; i < slots_per_page(c); i++) {
162 if (kip->slot_used[i] == SLOT_CLEAN) {
163 kip->slot_used[i] = SLOT_USED;
164 kip->nused++;
165 return kip->insns + (i * c->insn_size);
166 }
167 }
168 /* kip->nused is broken. Fix it. */
169 kip->nused = slots_per_page(c);
170 WARN_ON(1);
171 }
172 }
173
174 /* If there are any garbage slots, collect it and try again. */
175 if (c->nr_garbage && collect_garbage_slots(c) == 0)
176 goto retry;
177
178 /* All out of space. Need to allocate a new page. */
179 kip = kmalloc(KPROBE_INSN_PAGE_SIZE(slots_per_page(c)), GFP_KERNEL);
180 if (!kip)
181 return NULL;
182
183 /*
184 * Use module_alloc so this page is within +/- 2GB of where the
185 * kernel image and loaded module images reside. This is required
186 * so x86_64 can correctly handle the %rip-relative fixups.
187 */
188 kip->insns = module_alloc(PAGE_SIZE);
189 if (!kip->insns) {
190 kfree(kip);
191 return NULL;
192 }
193 INIT_LIST_HEAD(&kip->list);
194 memset(kip->slot_used, SLOT_CLEAN, slots_per_page(c));
195 kip->slot_used[0] = SLOT_USED;
196 kip->nused = 1;
197 kip->ngarbage = 0;
198 list_add(&kip->list, &c->pages);
199 return kip->insns;
200 }
201
202
get_insn_slot(void)203 kprobe_opcode_t __kprobes *get_insn_slot(void)
204 {
205 kprobe_opcode_t *ret = NULL;
206
207 mutex_lock(&kprobe_insn_mutex);
208 ret = __get_insn_slot(&kprobe_insn_slots);
209 mutex_unlock(&kprobe_insn_mutex);
210
211 return ret;
212 }
213
214 /* Return 1 if all garbages are collected, otherwise 0. */
collect_one_slot(struct kprobe_insn_page * kip,int idx)215 static int __kprobes collect_one_slot(struct kprobe_insn_page *kip, int idx)
216 {
217 kip->slot_used[idx] = SLOT_CLEAN;
218 kip->nused--;
219 if (kip->nused == 0) {
220 /*
221 * Page is no longer in use. Free it unless
222 * it's the last one. We keep the last one
223 * so as not to have to set it up again the
224 * next time somebody inserts a probe.
225 */
226 if (!list_is_singular(&kip->list)) {
227 list_del(&kip->list);
228 module_free(NULL, kip->insns);
229 kfree(kip);
230 }
231 return 1;
232 }
233 return 0;
234 }
235
collect_garbage_slots(struct kprobe_insn_cache * c)236 static int __kprobes collect_garbage_slots(struct kprobe_insn_cache *c)
237 {
238 struct kprobe_insn_page *kip, *next;
239
240 /* Ensure no-one is interrupted on the garbages */
241 synchronize_sched();
242
243 list_for_each_entry_safe(kip, next, &c->pages, list) {
244 int i;
245 if (kip->ngarbage == 0)
246 continue;
247 kip->ngarbage = 0; /* we will collect all garbages */
248 for (i = 0; i < slots_per_page(c); i++) {
249 if (kip->slot_used[i] == SLOT_DIRTY &&
250 collect_one_slot(kip, i))
251 break;
252 }
253 }
254 c->nr_garbage = 0;
255 return 0;
256 }
257
__free_insn_slot(struct kprobe_insn_cache * c,kprobe_opcode_t * slot,int dirty)258 static void __kprobes __free_insn_slot(struct kprobe_insn_cache *c,
259 kprobe_opcode_t *slot, int dirty)
260 {
261 struct kprobe_insn_page *kip;
262
263 list_for_each_entry(kip, &c->pages, list) {
264 long idx = ((long)slot - (long)kip->insns) /
265 (c->insn_size * sizeof(kprobe_opcode_t));
266 if (idx >= 0 && idx < slots_per_page(c)) {
267 WARN_ON(kip->slot_used[idx] != SLOT_USED);
268 if (dirty) {
269 kip->slot_used[idx] = SLOT_DIRTY;
270 kip->ngarbage++;
271 if (++c->nr_garbage > slots_per_page(c))
272 collect_garbage_slots(c);
273 } else
274 collect_one_slot(kip, idx);
275 return;
276 }
277 }
278 /* Could not free this slot. */
279 WARN_ON(1);
280 }
281
free_insn_slot(kprobe_opcode_t * slot,int dirty)282 void __kprobes free_insn_slot(kprobe_opcode_t * slot, int dirty)
283 {
284 mutex_lock(&kprobe_insn_mutex);
285 __free_insn_slot(&kprobe_insn_slots, slot, dirty);
286 mutex_unlock(&kprobe_insn_mutex);
287 }
288 #ifdef CONFIG_OPTPROBES
289 /* For optimized_kprobe buffer */
290 static DEFINE_MUTEX(kprobe_optinsn_mutex); /* Protects kprobe_optinsn_slots */
291 static struct kprobe_insn_cache kprobe_optinsn_slots = {
292 .pages = LIST_HEAD_INIT(kprobe_optinsn_slots.pages),
293 /* .insn_size is initialized later */
294 .nr_garbage = 0,
295 };
296 /* Get a slot for optimized_kprobe buffer */
get_optinsn_slot(void)297 kprobe_opcode_t __kprobes *get_optinsn_slot(void)
298 {
299 kprobe_opcode_t *ret = NULL;
300
301 mutex_lock(&kprobe_optinsn_mutex);
302 ret = __get_insn_slot(&kprobe_optinsn_slots);
303 mutex_unlock(&kprobe_optinsn_mutex);
304
305 return ret;
306 }
307
free_optinsn_slot(kprobe_opcode_t * slot,int dirty)308 void __kprobes free_optinsn_slot(kprobe_opcode_t * slot, int dirty)
309 {
310 mutex_lock(&kprobe_optinsn_mutex);
311 __free_insn_slot(&kprobe_optinsn_slots, slot, dirty);
312 mutex_unlock(&kprobe_optinsn_mutex);
313 }
314 #endif
315 #endif
316
317 /* We have preemption disabled.. so it is safe to use __ versions */
set_kprobe_instance(struct kprobe * kp)318 static inline void set_kprobe_instance(struct kprobe *kp)
319 {
320 __this_cpu_write(kprobe_instance, kp);
321 }
322
reset_kprobe_instance(void)323 static inline void reset_kprobe_instance(void)
324 {
325 __this_cpu_write(kprobe_instance, NULL);
326 }
327
328 /*
329 * This routine is called either:
330 * - under the kprobe_mutex - during kprobe_[un]register()
331 * OR
332 * - with preemption disabled - from arch/xxx/kernel/kprobes.c
333 */
get_kprobe(void * addr)334 struct kprobe __kprobes *get_kprobe(void *addr)
335 {
336 struct hlist_head *head;
337 struct kprobe *p;
338
339 head = &kprobe_table[hash_ptr(addr, KPROBE_HASH_BITS)];
340 hlist_for_each_entry_rcu(p, head, hlist) {
341 if (p->addr == addr)
342 return p;
343 }
344
345 return NULL;
346 }
347
348 static int __kprobes aggr_pre_handler(struct kprobe *p, struct pt_regs *regs);
349
350 /* Return true if the kprobe is an aggregator */
kprobe_aggrprobe(struct kprobe * p)351 static inline int kprobe_aggrprobe(struct kprobe *p)
352 {
353 return p->pre_handler == aggr_pre_handler;
354 }
355
356 /* Return true(!0) if the kprobe is unused */
kprobe_unused(struct kprobe * p)357 static inline int kprobe_unused(struct kprobe *p)
358 {
359 return kprobe_aggrprobe(p) && kprobe_disabled(p) &&
360 list_empty(&p->list);
361 }
362
363 /*
364 * Keep all fields in the kprobe consistent
365 */
copy_kprobe(struct kprobe * ap,struct kprobe * p)366 static inline void copy_kprobe(struct kprobe *ap, struct kprobe *p)
367 {
368 memcpy(&p->opcode, &ap->opcode, sizeof(kprobe_opcode_t));
369 memcpy(&p->ainsn, &ap->ainsn, sizeof(struct arch_specific_insn));
370 }
371
372 #ifdef CONFIG_OPTPROBES
373 /* NOTE: change this value only with kprobe_mutex held */
374 static bool kprobes_allow_optimization;
375
376 /*
377 * Call all pre_handler on the list, but ignores its return value.
378 * This must be called from arch-dep optimized caller.
379 */
opt_pre_handler(struct kprobe * p,struct pt_regs * regs)380 void __kprobes opt_pre_handler(struct kprobe *p, struct pt_regs *regs)
381 {
382 struct kprobe *kp;
383
384 list_for_each_entry_rcu(kp, &p->list, list) {
385 if (kp->pre_handler && likely(!kprobe_disabled(kp))) {
386 set_kprobe_instance(kp);
387 kp->pre_handler(kp, regs);
388 }
389 reset_kprobe_instance();
390 }
391 }
392
393 /* Free optimized instructions and optimized_kprobe */
free_aggr_kprobe(struct kprobe * p)394 static __kprobes void free_aggr_kprobe(struct kprobe *p)
395 {
396 struct optimized_kprobe *op;
397
398 op = container_of(p, struct optimized_kprobe, kp);
399 arch_remove_optimized_kprobe(op);
400 arch_remove_kprobe(p);
401 kfree(op);
402 }
403
404 /* Return true(!0) if the kprobe is ready for optimization. */
kprobe_optready(struct kprobe * p)405 static inline int kprobe_optready(struct kprobe *p)
406 {
407 struct optimized_kprobe *op;
408
409 if (kprobe_aggrprobe(p)) {
410 op = container_of(p, struct optimized_kprobe, kp);
411 return arch_prepared_optinsn(&op->optinsn);
412 }
413
414 return 0;
415 }
416
417 /* Return true(!0) if the kprobe is disarmed. Note: p must be on hash list */
kprobe_disarmed(struct kprobe * p)418 static inline int kprobe_disarmed(struct kprobe *p)
419 {
420 struct optimized_kprobe *op;
421
422 /* If kprobe is not aggr/opt probe, just return kprobe is disabled */
423 if (!kprobe_aggrprobe(p))
424 return kprobe_disabled(p);
425
426 op = container_of(p, struct optimized_kprobe, kp);
427
428 return kprobe_disabled(p) && list_empty(&op->list);
429 }
430
431 /* Return true(!0) if the probe is queued on (un)optimizing lists */
kprobe_queued(struct kprobe * p)432 static int __kprobes kprobe_queued(struct kprobe *p)
433 {
434 struct optimized_kprobe *op;
435
436 if (kprobe_aggrprobe(p)) {
437 op = container_of(p, struct optimized_kprobe, kp);
438 if (!list_empty(&op->list))
439 return 1;
440 }
441 return 0;
442 }
443
444 /*
445 * Return an optimized kprobe whose optimizing code replaces
446 * instructions including addr (exclude breakpoint).
447 */
get_optimized_kprobe(unsigned long addr)448 static struct kprobe *__kprobes get_optimized_kprobe(unsigned long addr)
449 {
450 int i;
451 struct kprobe *p = NULL;
452 struct optimized_kprobe *op;
453
454 /* Don't check i == 0, since that is a breakpoint case. */
455 for (i = 1; !p && i < MAX_OPTIMIZED_LENGTH; i++)
456 p = get_kprobe((void *)(addr - i));
457
458 if (p && kprobe_optready(p)) {
459 op = container_of(p, struct optimized_kprobe, kp);
460 if (arch_within_optimized_kprobe(op, addr))
461 return p;
462 }
463
464 return NULL;
465 }
466
467 /* Optimization staging list, protected by kprobe_mutex */
468 static LIST_HEAD(optimizing_list);
469 static LIST_HEAD(unoptimizing_list);
470 static LIST_HEAD(freeing_list);
471
472 static void kprobe_optimizer(struct work_struct *work);
473 static DECLARE_DELAYED_WORK(optimizing_work, kprobe_optimizer);
474 #define OPTIMIZE_DELAY 5
475
476 /*
477 * Optimize (replace a breakpoint with a jump) kprobes listed on
478 * optimizing_list.
479 */
do_optimize_kprobes(void)480 static __kprobes void do_optimize_kprobes(void)
481 {
482 /* Optimization never be done when disarmed */
483 if (kprobes_all_disarmed || !kprobes_allow_optimization ||
484 list_empty(&optimizing_list))
485 return;
486
487 /*
488 * The optimization/unoptimization refers online_cpus via
489 * stop_machine() and cpu-hotplug modifies online_cpus.
490 * And same time, text_mutex will be held in cpu-hotplug and here.
491 * This combination can cause a deadlock (cpu-hotplug try to lock
492 * text_mutex but stop_machine can not be done because online_cpus
493 * has been changed)
494 * To avoid this deadlock, we need to call get_online_cpus()
495 * for preventing cpu-hotplug outside of text_mutex locking.
496 */
497 get_online_cpus();
498 mutex_lock(&text_mutex);
499 arch_optimize_kprobes(&optimizing_list);
500 mutex_unlock(&text_mutex);
501 put_online_cpus();
502 }
503
504 /*
505 * Unoptimize (replace a jump with a breakpoint and remove the breakpoint
506 * if need) kprobes listed on unoptimizing_list.
507 */
do_unoptimize_kprobes(void)508 static __kprobes void do_unoptimize_kprobes(void)
509 {
510 struct optimized_kprobe *op, *tmp;
511
512 /* Unoptimization must be done anytime */
513 if (list_empty(&unoptimizing_list))
514 return;
515
516 /* Ditto to do_optimize_kprobes */
517 get_online_cpus();
518 mutex_lock(&text_mutex);
519 arch_unoptimize_kprobes(&unoptimizing_list, &freeing_list);
520 /* Loop free_list for disarming */
521 list_for_each_entry_safe(op, tmp, &freeing_list, list) {
522 /* Disarm probes if marked disabled */
523 if (kprobe_disabled(&op->kp))
524 arch_disarm_kprobe(&op->kp);
525 if (kprobe_unused(&op->kp)) {
526 /*
527 * Remove unused probes from hash list. After waiting
528 * for synchronization, these probes are reclaimed.
529 * (reclaiming is done by do_free_cleaned_kprobes.)
530 */
531 hlist_del_rcu(&op->kp.hlist);
532 } else
533 list_del_init(&op->list);
534 }
535 mutex_unlock(&text_mutex);
536 put_online_cpus();
537 }
538
539 /* Reclaim all kprobes on the free_list */
do_free_cleaned_kprobes(void)540 static __kprobes void do_free_cleaned_kprobes(void)
541 {
542 struct optimized_kprobe *op, *tmp;
543
544 list_for_each_entry_safe(op, tmp, &freeing_list, list) {
545 BUG_ON(!kprobe_unused(&op->kp));
546 list_del_init(&op->list);
547 free_aggr_kprobe(&op->kp);
548 }
549 }
550
551 /* Start optimizer after OPTIMIZE_DELAY passed */
kick_kprobe_optimizer(void)552 static __kprobes void kick_kprobe_optimizer(void)
553 {
554 schedule_delayed_work(&optimizing_work, OPTIMIZE_DELAY);
555 }
556
557 /* Kprobe jump optimizer */
kprobe_optimizer(struct work_struct * work)558 static __kprobes void kprobe_optimizer(struct work_struct *work)
559 {
560 mutex_lock(&kprobe_mutex);
561 /* Lock modules while optimizing kprobes */
562 mutex_lock(&module_mutex);
563
564 /*
565 * Step 1: Unoptimize kprobes and collect cleaned (unused and disarmed)
566 * kprobes before waiting for quiesence period.
567 */
568 do_unoptimize_kprobes();
569
570 /*
571 * Step 2: Wait for quiesence period to ensure all running interrupts
572 * are done. Because optprobe may modify multiple instructions
573 * there is a chance that Nth instruction is interrupted. In that
574 * case, running interrupt can return to 2nd-Nth byte of jump
575 * instruction. This wait is for avoiding it.
576 */
577 synchronize_sched();
578
579 /* Step 3: Optimize kprobes after quiesence period */
580 do_optimize_kprobes();
581
582 /* Step 4: Free cleaned kprobes after quiesence period */
583 do_free_cleaned_kprobes();
584
585 mutex_unlock(&module_mutex);
586 mutex_unlock(&kprobe_mutex);
587
588 /* Step 5: Kick optimizer again if needed */
589 if (!list_empty(&optimizing_list) || !list_empty(&unoptimizing_list))
590 kick_kprobe_optimizer();
591 }
592
593 /* Wait for completing optimization and unoptimization */
wait_for_kprobe_optimizer(void)594 static __kprobes void wait_for_kprobe_optimizer(void)
595 {
596 mutex_lock(&kprobe_mutex);
597
598 while (!list_empty(&optimizing_list) || !list_empty(&unoptimizing_list)) {
599 mutex_unlock(&kprobe_mutex);
600
601 /* this will also make optimizing_work execute immmediately */
602 flush_delayed_work(&optimizing_work);
603 /* @optimizing_work might not have been queued yet, relax */
604 cpu_relax();
605
606 mutex_lock(&kprobe_mutex);
607 }
608
609 mutex_unlock(&kprobe_mutex);
610 }
611
612 /* Optimize kprobe if p is ready to be optimized */
optimize_kprobe(struct kprobe * p)613 static __kprobes void optimize_kprobe(struct kprobe *p)
614 {
615 struct optimized_kprobe *op;
616
617 /* Check if the kprobe is disabled or not ready for optimization. */
618 if (!kprobe_optready(p) || !kprobes_allow_optimization ||
619 (kprobe_disabled(p) || kprobes_all_disarmed))
620 return;
621
622 /* Both of break_handler and post_handler are not supported. */
623 if (p->break_handler || p->post_handler)
624 return;
625
626 op = container_of(p, struct optimized_kprobe, kp);
627
628 /* Check there is no other kprobes at the optimized instructions */
629 if (arch_check_optimized_kprobe(op) < 0)
630 return;
631
632 /* Check if it is already optimized. */
633 if (op->kp.flags & KPROBE_FLAG_OPTIMIZED)
634 return;
635 op->kp.flags |= KPROBE_FLAG_OPTIMIZED;
636
637 if (!list_empty(&op->list))
638 /* This is under unoptimizing. Just dequeue the probe */
639 list_del_init(&op->list);
640 else {
641 list_add(&op->list, &optimizing_list);
642 kick_kprobe_optimizer();
643 }
644 }
645
646 /* Short cut to direct unoptimizing */
force_unoptimize_kprobe(struct optimized_kprobe * op)647 static __kprobes void force_unoptimize_kprobe(struct optimized_kprobe *op)
648 {
649 get_online_cpus();
650 arch_unoptimize_kprobe(op);
651 put_online_cpus();
652 if (kprobe_disabled(&op->kp))
653 arch_disarm_kprobe(&op->kp);
654 }
655
656 /* Unoptimize a kprobe if p is optimized */
unoptimize_kprobe(struct kprobe * p,bool force)657 static __kprobes void unoptimize_kprobe(struct kprobe *p, bool force)
658 {
659 struct optimized_kprobe *op;
660
661 if (!kprobe_aggrprobe(p) || kprobe_disarmed(p))
662 return; /* This is not an optprobe nor optimized */
663
664 op = container_of(p, struct optimized_kprobe, kp);
665 if (!kprobe_optimized(p)) {
666 /* Unoptimized or unoptimizing case */
667 if (force && !list_empty(&op->list)) {
668 /*
669 * Only if this is unoptimizing kprobe and forced,
670 * forcibly unoptimize it. (No need to unoptimize
671 * unoptimized kprobe again :)
672 */
673 list_del_init(&op->list);
674 force_unoptimize_kprobe(op);
675 }
676 return;
677 }
678
679 op->kp.flags &= ~KPROBE_FLAG_OPTIMIZED;
680 if (!list_empty(&op->list)) {
681 /* Dequeue from the optimization queue */
682 list_del_init(&op->list);
683 return;
684 }
685 /* Optimized kprobe case */
686 if (force)
687 /* Forcibly update the code: this is a special case */
688 force_unoptimize_kprobe(op);
689 else {
690 list_add(&op->list, &unoptimizing_list);
691 kick_kprobe_optimizer();
692 }
693 }
694
695 /* Cancel unoptimizing for reusing */
reuse_unused_kprobe(struct kprobe * ap)696 static void reuse_unused_kprobe(struct kprobe *ap)
697 {
698 struct optimized_kprobe *op;
699
700 BUG_ON(!kprobe_unused(ap));
701 /*
702 * Unused kprobe MUST be on the way of delayed unoptimizing (means
703 * there is still a relative jump) and disabled.
704 */
705 op = container_of(ap, struct optimized_kprobe, kp);
706 if (unlikely(list_empty(&op->list)))
707 printk(KERN_WARNING "Warning: found a stray unused "
708 "aggrprobe@%p\n", ap->addr);
709 /* Enable the probe again */
710 ap->flags &= ~KPROBE_FLAG_DISABLED;
711 /* Optimize it again (remove from op->list) */
712 BUG_ON(!kprobe_optready(ap));
713 optimize_kprobe(ap);
714 }
715
716 /* Remove optimized instructions */
kill_optimized_kprobe(struct kprobe * p)717 static void __kprobes kill_optimized_kprobe(struct kprobe *p)
718 {
719 struct optimized_kprobe *op;
720
721 op = container_of(p, struct optimized_kprobe, kp);
722 if (!list_empty(&op->list))
723 /* Dequeue from the (un)optimization queue */
724 list_del_init(&op->list);
725 op->kp.flags &= ~KPROBE_FLAG_OPTIMIZED;
726
727 if (kprobe_unused(p)) {
728 /* Enqueue if it is unused */
729 list_add(&op->list, &freeing_list);
730 /*
731 * Remove unused probes from the hash list. After waiting
732 * for synchronization, this probe is reclaimed.
733 * (reclaiming is done by do_free_cleaned_kprobes().)
734 */
735 hlist_del_rcu(&op->kp.hlist);
736 }
737
738 /* Don't touch the code, because it is already freed. */
739 arch_remove_optimized_kprobe(op);
740 }
741
742 /* Try to prepare optimized instructions */
prepare_optimized_kprobe(struct kprobe * p)743 static __kprobes void prepare_optimized_kprobe(struct kprobe *p)
744 {
745 struct optimized_kprobe *op;
746
747 op = container_of(p, struct optimized_kprobe, kp);
748 arch_prepare_optimized_kprobe(op);
749 }
750
751 /* Allocate new optimized_kprobe and try to prepare optimized instructions */
alloc_aggr_kprobe(struct kprobe * p)752 static __kprobes struct kprobe *alloc_aggr_kprobe(struct kprobe *p)
753 {
754 struct optimized_kprobe *op;
755
756 op = kzalloc(sizeof(struct optimized_kprobe), GFP_KERNEL);
757 if (!op)
758 return NULL;
759
760 INIT_LIST_HEAD(&op->list);
761 op->kp.addr = p->addr;
762 arch_prepare_optimized_kprobe(op);
763
764 return &op->kp;
765 }
766
767 static void __kprobes init_aggr_kprobe(struct kprobe *ap, struct kprobe *p);
768
769 /*
770 * Prepare an optimized_kprobe and optimize it
771 * NOTE: p must be a normal registered kprobe
772 */
try_to_optimize_kprobe(struct kprobe * p)773 static __kprobes void try_to_optimize_kprobe(struct kprobe *p)
774 {
775 struct kprobe *ap;
776 struct optimized_kprobe *op;
777
778 /* Impossible to optimize ftrace-based kprobe */
779 if (kprobe_ftrace(p))
780 return;
781
782 /* For preparing optimization, jump_label_text_reserved() is called */
783 jump_label_lock();
784 mutex_lock(&text_mutex);
785
786 ap = alloc_aggr_kprobe(p);
787 if (!ap)
788 goto out;
789
790 op = container_of(ap, struct optimized_kprobe, kp);
791 if (!arch_prepared_optinsn(&op->optinsn)) {
792 /* If failed to setup optimizing, fallback to kprobe */
793 arch_remove_optimized_kprobe(op);
794 kfree(op);
795 goto out;
796 }
797
798 init_aggr_kprobe(ap, p);
799 optimize_kprobe(ap); /* This just kicks optimizer thread */
800
801 out:
802 mutex_unlock(&text_mutex);
803 jump_label_unlock();
804 }
805
806 #ifdef CONFIG_SYSCTL
optimize_all_kprobes(void)807 static void __kprobes optimize_all_kprobes(void)
808 {
809 struct hlist_head *head;
810 struct kprobe *p;
811 unsigned int i;
812
813 mutex_lock(&kprobe_mutex);
814 /* If optimization is already allowed, just return */
815 if (kprobes_allow_optimization)
816 goto out;
817
818 kprobes_allow_optimization = true;
819 for (i = 0; i < KPROBE_TABLE_SIZE; i++) {
820 head = &kprobe_table[i];
821 hlist_for_each_entry_rcu(p, head, hlist)
822 if (!kprobe_disabled(p))
823 optimize_kprobe(p);
824 }
825 printk(KERN_INFO "Kprobes globally optimized\n");
826 out:
827 mutex_unlock(&kprobe_mutex);
828 }
829
unoptimize_all_kprobes(void)830 static void __kprobes unoptimize_all_kprobes(void)
831 {
832 struct hlist_head *head;
833 struct kprobe *p;
834 unsigned int i;
835
836 mutex_lock(&kprobe_mutex);
837 /* If optimization is already prohibited, just return */
838 if (!kprobes_allow_optimization) {
839 mutex_unlock(&kprobe_mutex);
840 return;
841 }
842
843 kprobes_allow_optimization = false;
844 for (i = 0; i < KPROBE_TABLE_SIZE; i++) {
845 head = &kprobe_table[i];
846 hlist_for_each_entry_rcu(p, head, hlist) {
847 if (!kprobe_disabled(p))
848 unoptimize_kprobe(p, false);
849 }
850 }
851 mutex_unlock(&kprobe_mutex);
852
853 /* Wait for unoptimizing completion */
854 wait_for_kprobe_optimizer();
855 printk(KERN_INFO "Kprobes globally unoptimized\n");
856 }
857
858 static DEFINE_MUTEX(kprobe_sysctl_mutex);
859 int sysctl_kprobes_optimization;
proc_kprobes_optimization_handler(struct ctl_table * table,int write,void __user * buffer,size_t * length,loff_t * ppos)860 int proc_kprobes_optimization_handler(struct ctl_table *table, int write,
861 void __user *buffer, size_t *length,
862 loff_t *ppos)
863 {
864 int ret;
865
866 mutex_lock(&kprobe_sysctl_mutex);
867 sysctl_kprobes_optimization = kprobes_allow_optimization ? 1 : 0;
868 ret = proc_dointvec_minmax(table, write, buffer, length, ppos);
869
870 if (sysctl_kprobes_optimization)
871 optimize_all_kprobes();
872 else
873 unoptimize_all_kprobes();
874 mutex_unlock(&kprobe_sysctl_mutex);
875
876 return ret;
877 }
878 #endif /* CONFIG_SYSCTL */
879
880 /* Put a breakpoint for a probe. Must be called with text_mutex locked */
__arm_kprobe(struct kprobe * p)881 static void __kprobes __arm_kprobe(struct kprobe *p)
882 {
883 struct kprobe *_p;
884
885 /* Check collision with other optimized kprobes */
886 _p = get_optimized_kprobe((unsigned long)p->addr);
887 if (unlikely(_p))
888 /* Fallback to unoptimized kprobe */
889 unoptimize_kprobe(_p, true);
890
891 arch_arm_kprobe(p);
892 optimize_kprobe(p); /* Try to optimize (add kprobe to a list) */
893 }
894
895 /* Remove the breakpoint of a probe. Must be called with text_mutex locked */
__disarm_kprobe(struct kprobe * p,bool reopt)896 static void __kprobes __disarm_kprobe(struct kprobe *p, bool reopt)
897 {
898 struct kprobe *_p;
899
900 unoptimize_kprobe(p, false); /* Try to unoptimize */
901
902 if (!kprobe_queued(p)) {
903 arch_disarm_kprobe(p);
904 /* If another kprobe was blocked, optimize it. */
905 _p = get_optimized_kprobe((unsigned long)p->addr);
906 if (unlikely(_p) && reopt)
907 optimize_kprobe(_p);
908 }
909 /* TODO: reoptimize others after unoptimized this probe */
910 }
911
912 #else /* !CONFIG_OPTPROBES */
913
914 #define optimize_kprobe(p) do {} while (0)
915 #define unoptimize_kprobe(p, f) do {} while (0)
916 #define kill_optimized_kprobe(p) do {} while (0)
917 #define prepare_optimized_kprobe(p) do {} while (0)
918 #define try_to_optimize_kprobe(p) do {} while (0)
919 #define __arm_kprobe(p) arch_arm_kprobe(p)
920 #define __disarm_kprobe(p, o) arch_disarm_kprobe(p)
921 #define kprobe_disarmed(p) kprobe_disabled(p)
922 #define wait_for_kprobe_optimizer() do {} while (0)
923
924 /* There should be no unused kprobes can be reused without optimization */
reuse_unused_kprobe(struct kprobe * ap)925 static void reuse_unused_kprobe(struct kprobe *ap)
926 {
927 printk(KERN_ERR "Error: There should be no unused kprobe here.\n");
928 BUG_ON(kprobe_unused(ap));
929 }
930
free_aggr_kprobe(struct kprobe * p)931 static __kprobes void free_aggr_kprobe(struct kprobe *p)
932 {
933 arch_remove_kprobe(p);
934 kfree(p);
935 }
936
alloc_aggr_kprobe(struct kprobe * p)937 static __kprobes struct kprobe *alloc_aggr_kprobe(struct kprobe *p)
938 {
939 return kzalloc(sizeof(struct kprobe), GFP_KERNEL);
940 }
941 #endif /* CONFIG_OPTPROBES */
942
943 #ifdef CONFIG_KPROBES_ON_FTRACE
944 static struct ftrace_ops kprobe_ftrace_ops __read_mostly = {
945 .func = kprobe_ftrace_handler,
946 .flags = FTRACE_OPS_FL_SAVE_REGS,
947 };
948 static int kprobe_ftrace_enabled;
949
950 /* Must ensure p->addr is really on ftrace */
prepare_kprobe(struct kprobe * p)951 static int __kprobes prepare_kprobe(struct kprobe *p)
952 {
953 if (!kprobe_ftrace(p))
954 return arch_prepare_kprobe(p);
955
956 return arch_prepare_kprobe_ftrace(p);
957 }
958
959 /* Caller must lock kprobe_mutex */
arm_kprobe_ftrace(struct kprobe * p)960 static void __kprobes arm_kprobe_ftrace(struct kprobe *p)
961 {
962 int ret;
963
964 ret = ftrace_set_filter_ip(&kprobe_ftrace_ops,
965 (unsigned long)p->addr, 0, 0);
966 WARN(ret < 0, "Failed to arm kprobe-ftrace at %p (%d)\n", p->addr, ret);
967 kprobe_ftrace_enabled++;
968 if (kprobe_ftrace_enabled == 1) {
969 ret = register_ftrace_function(&kprobe_ftrace_ops);
970 WARN(ret < 0, "Failed to init kprobe-ftrace (%d)\n", ret);
971 }
972 }
973
974 /* Caller must lock kprobe_mutex */
disarm_kprobe_ftrace(struct kprobe * p)975 static void __kprobes disarm_kprobe_ftrace(struct kprobe *p)
976 {
977 int ret;
978
979 kprobe_ftrace_enabled--;
980 if (kprobe_ftrace_enabled == 0) {
981 ret = unregister_ftrace_function(&kprobe_ftrace_ops);
982 WARN(ret < 0, "Failed to init kprobe-ftrace (%d)\n", ret);
983 }
984 ret = ftrace_set_filter_ip(&kprobe_ftrace_ops,
985 (unsigned long)p->addr, 1, 0);
986 WARN(ret < 0, "Failed to disarm kprobe-ftrace at %p (%d)\n", p->addr, ret);
987 }
988 #else /* !CONFIG_KPROBES_ON_FTRACE */
989 #define prepare_kprobe(p) arch_prepare_kprobe(p)
990 #define arm_kprobe_ftrace(p) do {} while (0)
991 #define disarm_kprobe_ftrace(p) do {} while (0)
992 #endif
993
994 /* Arm a kprobe with text_mutex */
arm_kprobe(struct kprobe * kp)995 static void __kprobes arm_kprobe(struct kprobe *kp)
996 {
997 if (unlikely(kprobe_ftrace(kp))) {
998 arm_kprobe_ftrace(kp);
999 return;
1000 }
1001 /*
1002 * Here, since __arm_kprobe() doesn't use stop_machine(),
1003 * this doesn't cause deadlock on text_mutex. So, we don't
1004 * need get_online_cpus().
1005 */
1006 mutex_lock(&text_mutex);
1007 __arm_kprobe(kp);
1008 mutex_unlock(&text_mutex);
1009 }
1010
1011 /* Disarm a kprobe with text_mutex */
disarm_kprobe(struct kprobe * kp,bool reopt)1012 static void __kprobes disarm_kprobe(struct kprobe *kp, bool reopt)
1013 {
1014 if (unlikely(kprobe_ftrace(kp))) {
1015 disarm_kprobe_ftrace(kp);
1016 return;
1017 }
1018 /* Ditto */
1019 mutex_lock(&text_mutex);
1020 __disarm_kprobe(kp, reopt);
1021 mutex_unlock(&text_mutex);
1022 }
1023
1024 /*
1025 * Aggregate handlers for multiple kprobes support - these handlers
1026 * take care of invoking the individual kprobe handlers on p->list
1027 */
aggr_pre_handler(struct kprobe * p,struct pt_regs * regs)1028 static int __kprobes aggr_pre_handler(struct kprobe *p, struct pt_regs *regs)
1029 {
1030 struct kprobe *kp;
1031
1032 list_for_each_entry_rcu(kp, &p->list, list) {
1033 if (kp->pre_handler && likely(!kprobe_disabled(kp))) {
1034 set_kprobe_instance(kp);
1035 if (kp->pre_handler(kp, regs))
1036 return 1;
1037 }
1038 reset_kprobe_instance();
1039 }
1040 return 0;
1041 }
1042
aggr_post_handler(struct kprobe * p,struct pt_regs * regs,unsigned long flags)1043 static void __kprobes aggr_post_handler(struct kprobe *p, struct pt_regs *regs,
1044 unsigned long flags)
1045 {
1046 struct kprobe *kp;
1047
1048 list_for_each_entry_rcu(kp, &p->list, list) {
1049 if (kp->post_handler && likely(!kprobe_disabled(kp))) {
1050 set_kprobe_instance(kp);
1051 kp->post_handler(kp, regs, flags);
1052 reset_kprobe_instance();
1053 }
1054 }
1055 }
1056
aggr_fault_handler(struct kprobe * p,struct pt_regs * regs,int trapnr)1057 static int __kprobes aggr_fault_handler(struct kprobe *p, struct pt_regs *regs,
1058 int trapnr)
1059 {
1060 struct kprobe *cur = __this_cpu_read(kprobe_instance);
1061
1062 /*
1063 * if we faulted "during" the execution of a user specified
1064 * probe handler, invoke just that probe's fault handler
1065 */
1066 if (cur && cur->fault_handler) {
1067 if (cur->fault_handler(cur, regs, trapnr))
1068 return 1;
1069 }
1070 return 0;
1071 }
1072
aggr_break_handler(struct kprobe * p,struct pt_regs * regs)1073 static int __kprobes aggr_break_handler(struct kprobe *p, struct pt_regs *regs)
1074 {
1075 struct kprobe *cur = __this_cpu_read(kprobe_instance);
1076 int ret = 0;
1077
1078 if (cur && cur->break_handler) {
1079 if (cur->break_handler(cur, regs))
1080 ret = 1;
1081 }
1082 reset_kprobe_instance();
1083 return ret;
1084 }
1085
1086 /* Walks the list and increments nmissed count for multiprobe case */
kprobes_inc_nmissed_count(struct kprobe * p)1087 void __kprobes kprobes_inc_nmissed_count(struct kprobe *p)
1088 {
1089 struct kprobe *kp;
1090 if (!kprobe_aggrprobe(p)) {
1091 p->nmissed++;
1092 } else {
1093 list_for_each_entry_rcu(kp, &p->list, list)
1094 kp->nmissed++;
1095 }
1096 return;
1097 }
1098
recycle_rp_inst(struct kretprobe_instance * ri,struct hlist_head * head)1099 void __kprobes recycle_rp_inst(struct kretprobe_instance *ri,
1100 struct hlist_head *head)
1101 {
1102 struct kretprobe *rp = ri->rp;
1103
1104 /* remove rp inst off the rprobe_inst_table */
1105 hlist_del(&ri->hlist);
1106 INIT_HLIST_NODE(&ri->hlist);
1107 if (likely(rp)) {
1108 raw_spin_lock(&rp->lock);
1109 hlist_add_head(&ri->hlist, &rp->free_instances);
1110 raw_spin_unlock(&rp->lock);
1111 } else
1112 /* Unregistering */
1113 hlist_add_head(&ri->hlist, head);
1114 }
1115
kretprobe_hash_lock(struct task_struct * tsk,struct hlist_head ** head,unsigned long * flags)1116 void __kprobes kretprobe_hash_lock(struct task_struct *tsk,
1117 struct hlist_head **head, unsigned long *flags)
1118 __acquires(hlist_lock)
1119 {
1120 unsigned long hash = hash_ptr(tsk, KPROBE_HASH_BITS);
1121 raw_spinlock_t *hlist_lock;
1122
1123 *head = &kretprobe_inst_table[hash];
1124 hlist_lock = kretprobe_table_lock_ptr(hash);
1125 raw_spin_lock_irqsave(hlist_lock, *flags);
1126 }
1127
kretprobe_table_lock(unsigned long hash,unsigned long * flags)1128 static void __kprobes kretprobe_table_lock(unsigned long hash,
1129 unsigned long *flags)
1130 __acquires(hlist_lock)
1131 {
1132 raw_spinlock_t *hlist_lock = kretprobe_table_lock_ptr(hash);
1133 raw_spin_lock_irqsave(hlist_lock, *flags);
1134 }
1135
kretprobe_hash_unlock(struct task_struct * tsk,unsigned long * flags)1136 void __kprobes kretprobe_hash_unlock(struct task_struct *tsk,
1137 unsigned long *flags)
1138 __releases(hlist_lock)
1139 {
1140 unsigned long hash = hash_ptr(tsk, KPROBE_HASH_BITS);
1141 raw_spinlock_t *hlist_lock;
1142
1143 hlist_lock = kretprobe_table_lock_ptr(hash);
1144 raw_spin_unlock_irqrestore(hlist_lock, *flags);
1145 }
1146
kretprobe_table_unlock(unsigned long hash,unsigned long * flags)1147 static void __kprobes kretprobe_table_unlock(unsigned long hash,
1148 unsigned long *flags)
1149 __releases(hlist_lock)
1150 {
1151 raw_spinlock_t *hlist_lock = kretprobe_table_lock_ptr(hash);
1152 raw_spin_unlock_irqrestore(hlist_lock, *flags);
1153 }
1154
1155 /*
1156 * This function is called from finish_task_switch when task tk becomes dead,
1157 * so that we can recycle any function-return probe instances associated
1158 * with this task. These left over instances represent probed functions
1159 * that have been called but will never return.
1160 */
kprobe_flush_task(struct task_struct * tk)1161 void __kprobes kprobe_flush_task(struct task_struct *tk)
1162 {
1163 struct kretprobe_instance *ri;
1164 struct hlist_head *head, empty_rp;
1165 struct hlist_node *tmp;
1166 unsigned long hash, flags = 0;
1167
1168 if (unlikely(!kprobes_initialized))
1169 /* Early boot. kretprobe_table_locks not yet initialized. */
1170 return;
1171
1172 INIT_HLIST_HEAD(&empty_rp);
1173 hash = hash_ptr(tk, KPROBE_HASH_BITS);
1174 head = &kretprobe_inst_table[hash];
1175 kretprobe_table_lock(hash, &flags);
1176 hlist_for_each_entry_safe(ri, tmp, head, hlist) {
1177 if (ri->task == tk)
1178 recycle_rp_inst(ri, &empty_rp);
1179 }
1180 kretprobe_table_unlock(hash, &flags);
1181 hlist_for_each_entry_safe(ri, tmp, &empty_rp, hlist) {
1182 hlist_del(&ri->hlist);
1183 kfree(ri);
1184 }
1185 }
1186
free_rp_inst(struct kretprobe * rp)1187 static inline void free_rp_inst(struct kretprobe *rp)
1188 {
1189 struct kretprobe_instance *ri;
1190 struct hlist_node *next;
1191
1192 hlist_for_each_entry_safe(ri, next, &rp->free_instances, hlist) {
1193 hlist_del(&ri->hlist);
1194 kfree(ri);
1195 }
1196 }
1197
cleanup_rp_inst(struct kretprobe * rp)1198 static void __kprobes cleanup_rp_inst(struct kretprobe *rp)
1199 {
1200 unsigned long flags, hash;
1201 struct kretprobe_instance *ri;
1202 struct hlist_node *next;
1203 struct hlist_head *head;
1204
1205 /* No race here */
1206 for (hash = 0; hash < KPROBE_TABLE_SIZE; hash++) {
1207 kretprobe_table_lock(hash, &flags);
1208 head = &kretprobe_inst_table[hash];
1209 hlist_for_each_entry_safe(ri, next, head, hlist) {
1210 if (ri->rp == rp)
1211 ri->rp = NULL;
1212 }
1213 kretprobe_table_unlock(hash, &flags);
1214 }
1215 free_rp_inst(rp);
1216 }
1217
1218 /*
1219 * Add the new probe to ap->list. Fail if this is the
1220 * second jprobe at the address - two jprobes can't coexist
1221 */
add_new_kprobe(struct kprobe * ap,struct kprobe * p)1222 static int __kprobes add_new_kprobe(struct kprobe *ap, struct kprobe *p)
1223 {
1224 BUG_ON(kprobe_gone(ap) || kprobe_gone(p));
1225
1226 if (p->break_handler || p->post_handler)
1227 unoptimize_kprobe(ap, true); /* Fall back to normal kprobe */
1228
1229 if (p->break_handler) {
1230 if (ap->break_handler)
1231 return -EEXIST;
1232 list_add_tail_rcu(&p->list, &ap->list);
1233 ap->break_handler = aggr_break_handler;
1234 } else
1235 list_add_rcu(&p->list, &ap->list);
1236 if (p->post_handler && !ap->post_handler)
1237 ap->post_handler = aggr_post_handler;
1238
1239 return 0;
1240 }
1241
1242 /*
1243 * Fill in the required fields of the "manager kprobe". Replace the
1244 * earlier kprobe in the hlist with the manager kprobe
1245 */
init_aggr_kprobe(struct kprobe * ap,struct kprobe * p)1246 static void __kprobes init_aggr_kprobe(struct kprobe *ap, struct kprobe *p)
1247 {
1248 /* Copy p's insn slot to ap */
1249 copy_kprobe(p, ap);
1250 flush_insn_slot(ap);
1251 ap->addr = p->addr;
1252 ap->flags = p->flags & ~KPROBE_FLAG_OPTIMIZED;
1253 ap->pre_handler = aggr_pre_handler;
1254 ap->fault_handler = aggr_fault_handler;
1255 /* We don't care the kprobe which has gone. */
1256 if (p->post_handler && !kprobe_gone(p))
1257 ap->post_handler = aggr_post_handler;
1258 if (p->break_handler && !kprobe_gone(p))
1259 ap->break_handler = aggr_break_handler;
1260
1261 INIT_LIST_HEAD(&ap->list);
1262 INIT_HLIST_NODE(&ap->hlist);
1263
1264 list_add_rcu(&p->list, &ap->list);
1265 hlist_replace_rcu(&p->hlist, &ap->hlist);
1266 }
1267
1268 /*
1269 * This is the second or subsequent kprobe at the address - handle
1270 * the intricacies
1271 */
register_aggr_kprobe(struct kprobe * orig_p,struct kprobe * p)1272 static int __kprobes register_aggr_kprobe(struct kprobe *orig_p,
1273 struct kprobe *p)
1274 {
1275 int ret = 0;
1276 struct kprobe *ap = orig_p;
1277
1278 /* For preparing optimization, jump_label_text_reserved() is called */
1279 jump_label_lock();
1280 /*
1281 * Get online CPUs to avoid text_mutex deadlock.with stop machine,
1282 * which is invoked by unoptimize_kprobe() in add_new_kprobe()
1283 */
1284 get_online_cpus();
1285 mutex_lock(&text_mutex);
1286
1287 if (!kprobe_aggrprobe(orig_p)) {
1288 /* If orig_p is not an aggr_kprobe, create new aggr_kprobe. */
1289 ap = alloc_aggr_kprobe(orig_p);
1290 if (!ap) {
1291 ret = -ENOMEM;
1292 goto out;
1293 }
1294 init_aggr_kprobe(ap, orig_p);
1295 } else if (kprobe_unused(ap))
1296 /* This probe is going to die. Rescue it */
1297 reuse_unused_kprobe(ap);
1298
1299 if (kprobe_gone(ap)) {
1300 /*
1301 * Attempting to insert new probe at the same location that
1302 * had a probe in the module vaddr area which already
1303 * freed. So, the instruction slot has already been
1304 * released. We need a new slot for the new probe.
1305 */
1306 ret = arch_prepare_kprobe(ap);
1307 if (ret)
1308 /*
1309 * Even if fail to allocate new slot, don't need to
1310 * free aggr_probe. It will be used next time, or
1311 * freed by unregister_kprobe.
1312 */
1313 goto out;
1314
1315 /* Prepare optimized instructions if possible. */
1316 prepare_optimized_kprobe(ap);
1317
1318 /*
1319 * Clear gone flag to prevent allocating new slot again, and
1320 * set disabled flag because it is not armed yet.
1321 */
1322 ap->flags = (ap->flags & ~KPROBE_FLAG_GONE)
1323 | KPROBE_FLAG_DISABLED;
1324 }
1325
1326 /* Copy ap's insn slot to p */
1327 copy_kprobe(ap, p);
1328 ret = add_new_kprobe(ap, p);
1329
1330 out:
1331 mutex_unlock(&text_mutex);
1332 put_online_cpus();
1333 jump_label_unlock();
1334
1335 if (ret == 0 && kprobe_disabled(ap) && !kprobe_disabled(p)) {
1336 ap->flags &= ~KPROBE_FLAG_DISABLED;
1337 if (!kprobes_all_disarmed)
1338 /* Arm the breakpoint again. */
1339 arm_kprobe(ap);
1340 }
1341 return ret;
1342 }
1343
in_kprobes_functions(unsigned long addr)1344 static int __kprobes in_kprobes_functions(unsigned long addr)
1345 {
1346 struct kprobe_blackpoint *kb;
1347
1348 if (addr >= (unsigned long)__kprobes_text_start &&
1349 addr < (unsigned long)__kprobes_text_end)
1350 return -EINVAL;
1351 /*
1352 * If there exists a kprobe_blacklist, verify and
1353 * fail any probe registration in the prohibited area
1354 */
1355 for (kb = kprobe_blacklist; kb->name != NULL; kb++) {
1356 if (kb->start_addr) {
1357 if (addr >= kb->start_addr &&
1358 addr < (kb->start_addr + kb->range))
1359 return -EINVAL;
1360 }
1361 }
1362 return 0;
1363 }
1364
1365 /*
1366 * If we have a symbol_name argument, look it up and add the offset field
1367 * to it. This way, we can specify a relative address to a symbol.
1368 * This returns encoded errors if it fails to look up symbol or invalid
1369 * combination of parameters.
1370 */
kprobe_addr(struct kprobe * p)1371 static kprobe_opcode_t __kprobes *kprobe_addr(struct kprobe *p)
1372 {
1373 kprobe_opcode_t *addr = p->addr;
1374
1375 if ((p->symbol_name && p->addr) ||
1376 (!p->symbol_name && !p->addr))
1377 goto invalid;
1378
1379 if (p->symbol_name) {
1380 kprobe_lookup_name(p->symbol_name, addr);
1381 if (!addr)
1382 return ERR_PTR(-ENOENT);
1383 }
1384
1385 addr = (kprobe_opcode_t *)(((char *)addr) + p->offset);
1386 if (addr)
1387 return addr;
1388
1389 invalid:
1390 return ERR_PTR(-EINVAL);
1391 }
1392
1393 /* Check passed kprobe is valid and return kprobe in kprobe_table. */
__get_valid_kprobe(struct kprobe * p)1394 static struct kprobe * __kprobes __get_valid_kprobe(struct kprobe *p)
1395 {
1396 struct kprobe *ap, *list_p;
1397
1398 ap = get_kprobe(p->addr);
1399 if (unlikely(!ap))
1400 return NULL;
1401
1402 if (p != ap) {
1403 list_for_each_entry_rcu(list_p, &ap->list, list)
1404 if (list_p == p)
1405 /* kprobe p is a valid probe */
1406 goto valid;
1407 return NULL;
1408 }
1409 valid:
1410 return ap;
1411 }
1412
1413 /* Return error if the kprobe is being re-registered */
check_kprobe_rereg(struct kprobe * p)1414 static inline int check_kprobe_rereg(struct kprobe *p)
1415 {
1416 int ret = 0;
1417
1418 mutex_lock(&kprobe_mutex);
1419 if (__get_valid_kprobe(p))
1420 ret = -EINVAL;
1421 mutex_unlock(&kprobe_mutex);
1422
1423 return ret;
1424 }
1425
check_kprobe_address_safe(struct kprobe * p,struct module ** probed_mod)1426 static __kprobes int check_kprobe_address_safe(struct kprobe *p,
1427 struct module **probed_mod)
1428 {
1429 int ret = 0;
1430 unsigned long ftrace_addr;
1431
1432 /*
1433 * If the address is located on a ftrace nop, set the
1434 * breakpoint to the following instruction.
1435 */
1436 ftrace_addr = ftrace_location((unsigned long)p->addr);
1437 if (ftrace_addr) {
1438 #ifdef CONFIG_KPROBES_ON_FTRACE
1439 /* Given address is not on the instruction boundary */
1440 if ((unsigned long)p->addr != ftrace_addr)
1441 return -EILSEQ;
1442 p->flags |= KPROBE_FLAG_FTRACE;
1443 #else /* !CONFIG_KPROBES_ON_FTRACE */
1444 return -EINVAL;
1445 #endif
1446 }
1447
1448 jump_label_lock();
1449 preempt_disable();
1450
1451 /* Ensure it is not in reserved area nor out of text */
1452 if (!kernel_text_address((unsigned long) p->addr) ||
1453 in_kprobes_functions((unsigned long) p->addr) ||
1454 jump_label_text_reserved(p->addr, p->addr)) {
1455 ret = -EINVAL;
1456 goto out;
1457 }
1458
1459 /* Check if are we probing a module */
1460 *probed_mod = __module_text_address((unsigned long) p->addr);
1461 if (*probed_mod) {
1462 /*
1463 * We must hold a refcount of the probed module while updating
1464 * its code to prohibit unexpected unloading.
1465 */
1466 if (unlikely(!try_module_get(*probed_mod))) {
1467 ret = -ENOENT;
1468 goto out;
1469 }
1470
1471 /*
1472 * If the module freed .init.text, we couldn't insert
1473 * kprobes in there.
1474 */
1475 if (within_module_init((unsigned long)p->addr, *probed_mod) &&
1476 (*probed_mod)->state != MODULE_STATE_COMING) {
1477 module_put(*probed_mod);
1478 *probed_mod = NULL;
1479 ret = -ENOENT;
1480 }
1481 }
1482 out:
1483 preempt_enable();
1484 jump_label_unlock();
1485
1486 return ret;
1487 }
1488
register_kprobe(struct kprobe * p)1489 int __kprobes register_kprobe(struct kprobe *p)
1490 {
1491 int ret;
1492 struct kprobe *old_p;
1493 struct module *probed_mod;
1494 kprobe_opcode_t *addr;
1495
1496 /* Adjust probe address from symbol */
1497 addr = kprobe_addr(p);
1498 if (IS_ERR(addr))
1499 return PTR_ERR(addr);
1500 p->addr = addr;
1501
1502 ret = check_kprobe_rereg(p);
1503 if (ret)
1504 return ret;
1505
1506 /* User can pass only KPROBE_FLAG_DISABLED to register_kprobe */
1507 p->flags &= KPROBE_FLAG_DISABLED;
1508 p->nmissed = 0;
1509 INIT_LIST_HEAD(&p->list);
1510
1511 ret = check_kprobe_address_safe(p, &probed_mod);
1512 if (ret)
1513 return ret;
1514
1515 mutex_lock(&kprobe_mutex);
1516
1517 old_p = get_kprobe(p->addr);
1518 if (old_p) {
1519 /* Since this may unoptimize old_p, locking text_mutex. */
1520 ret = register_aggr_kprobe(old_p, p);
1521 goto out;
1522 }
1523
1524 mutex_lock(&text_mutex); /* Avoiding text modification */
1525 ret = prepare_kprobe(p);
1526 mutex_unlock(&text_mutex);
1527 if (ret)
1528 goto out;
1529
1530 INIT_HLIST_NODE(&p->hlist);
1531 hlist_add_head_rcu(&p->hlist,
1532 &kprobe_table[hash_ptr(p->addr, KPROBE_HASH_BITS)]);
1533
1534 if (!kprobes_all_disarmed && !kprobe_disabled(p))
1535 arm_kprobe(p);
1536
1537 /* Try to optimize kprobe */
1538 try_to_optimize_kprobe(p);
1539
1540 out:
1541 mutex_unlock(&kprobe_mutex);
1542
1543 if (probed_mod)
1544 module_put(probed_mod);
1545
1546 return ret;
1547 }
1548 EXPORT_SYMBOL_GPL(register_kprobe);
1549
1550 /* Check if all probes on the aggrprobe are disabled */
aggr_kprobe_disabled(struct kprobe * ap)1551 static int __kprobes aggr_kprobe_disabled(struct kprobe *ap)
1552 {
1553 struct kprobe *kp;
1554
1555 list_for_each_entry_rcu(kp, &ap->list, list)
1556 if (!kprobe_disabled(kp))
1557 /*
1558 * There is an active probe on the list.
1559 * We can't disable this ap.
1560 */
1561 return 0;
1562
1563 return 1;
1564 }
1565
1566 /* Disable one kprobe: Make sure called under kprobe_mutex is locked */
__disable_kprobe(struct kprobe * p)1567 static struct kprobe *__kprobes __disable_kprobe(struct kprobe *p)
1568 {
1569 struct kprobe *orig_p;
1570
1571 /* Get an original kprobe for return */
1572 orig_p = __get_valid_kprobe(p);
1573 if (unlikely(orig_p == NULL))
1574 return NULL;
1575
1576 if (!kprobe_disabled(p)) {
1577 /* Disable probe if it is a child probe */
1578 if (p != orig_p)
1579 p->flags |= KPROBE_FLAG_DISABLED;
1580
1581 /* Try to disarm and disable this/parent probe */
1582 if (p == orig_p || aggr_kprobe_disabled(orig_p)) {
1583 disarm_kprobe(orig_p, true);
1584 orig_p->flags |= KPROBE_FLAG_DISABLED;
1585 }
1586 }
1587
1588 return orig_p;
1589 }
1590
1591 /*
1592 * Unregister a kprobe without a scheduler synchronization.
1593 */
__unregister_kprobe_top(struct kprobe * p)1594 static int __kprobes __unregister_kprobe_top(struct kprobe *p)
1595 {
1596 struct kprobe *ap, *list_p;
1597
1598 /* Disable kprobe. This will disarm it if needed. */
1599 ap = __disable_kprobe(p);
1600 if (ap == NULL)
1601 return -EINVAL;
1602
1603 if (ap == p)
1604 /*
1605 * This probe is an independent(and non-optimized) kprobe
1606 * (not an aggrprobe). Remove from the hash list.
1607 */
1608 goto disarmed;
1609
1610 /* Following process expects this probe is an aggrprobe */
1611 WARN_ON(!kprobe_aggrprobe(ap));
1612
1613 if (list_is_singular(&ap->list) && kprobe_disarmed(ap))
1614 /*
1615 * !disarmed could be happen if the probe is under delayed
1616 * unoptimizing.
1617 */
1618 goto disarmed;
1619 else {
1620 /* If disabling probe has special handlers, update aggrprobe */
1621 if (p->break_handler && !kprobe_gone(p))
1622 ap->break_handler = NULL;
1623 if (p->post_handler && !kprobe_gone(p)) {
1624 list_for_each_entry_rcu(list_p, &ap->list, list) {
1625 if ((list_p != p) && (list_p->post_handler))
1626 goto noclean;
1627 }
1628 ap->post_handler = NULL;
1629 }
1630 noclean:
1631 /*
1632 * Remove from the aggrprobe: this path will do nothing in
1633 * __unregister_kprobe_bottom().
1634 */
1635 list_del_rcu(&p->list);
1636 if (!kprobe_disabled(ap) && !kprobes_all_disarmed)
1637 /*
1638 * Try to optimize this probe again, because post
1639 * handler may have been changed.
1640 */
1641 optimize_kprobe(ap);
1642 }
1643 return 0;
1644
1645 disarmed:
1646 BUG_ON(!kprobe_disarmed(ap));
1647 hlist_del_rcu(&ap->hlist);
1648 return 0;
1649 }
1650
__unregister_kprobe_bottom(struct kprobe * p)1651 static void __kprobes __unregister_kprobe_bottom(struct kprobe *p)
1652 {
1653 struct kprobe *ap;
1654
1655 if (list_empty(&p->list))
1656 /* This is an independent kprobe */
1657 arch_remove_kprobe(p);
1658 else if (list_is_singular(&p->list)) {
1659 /* This is the last child of an aggrprobe */
1660 ap = list_entry(p->list.next, struct kprobe, list);
1661 list_del(&p->list);
1662 free_aggr_kprobe(ap);
1663 }
1664 /* Otherwise, do nothing. */
1665 }
1666
register_kprobes(struct kprobe ** kps,int num)1667 int __kprobes register_kprobes(struct kprobe **kps, int num)
1668 {
1669 int i, ret = 0;
1670
1671 if (num <= 0)
1672 return -EINVAL;
1673 for (i = 0; i < num; i++) {
1674 ret = register_kprobe(kps[i]);
1675 if (ret < 0) {
1676 if (i > 0)
1677 unregister_kprobes(kps, i);
1678 break;
1679 }
1680 }
1681 return ret;
1682 }
1683 EXPORT_SYMBOL_GPL(register_kprobes);
1684
unregister_kprobe(struct kprobe * p)1685 void __kprobes unregister_kprobe(struct kprobe *p)
1686 {
1687 unregister_kprobes(&p, 1);
1688 }
1689 EXPORT_SYMBOL_GPL(unregister_kprobe);
1690
unregister_kprobes(struct kprobe ** kps,int num)1691 void __kprobes unregister_kprobes(struct kprobe **kps, int num)
1692 {
1693 int i;
1694
1695 if (num <= 0)
1696 return;
1697 mutex_lock(&kprobe_mutex);
1698 for (i = 0; i < num; i++)
1699 if (__unregister_kprobe_top(kps[i]) < 0)
1700 kps[i]->addr = NULL;
1701 mutex_unlock(&kprobe_mutex);
1702
1703 synchronize_sched();
1704 for (i = 0; i < num; i++)
1705 if (kps[i]->addr)
1706 __unregister_kprobe_bottom(kps[i]);
1707 }
1708 EXPORT_SYMBOL_GPL(unregister_kprobes);
1709
1710 static struct notifier_block kprobe_exceptions_nb = {
1711 .notifier_call = kprobe_exceptions_notify,
1712 .priority = 0x7fffffff /* we need to be notified first */
1713 };
1714
arch_deref_entry_point(void * entry)1715 unsigned long __weak arch_deref_entry_point(void *entry)
1716 {
1717 return (unsigned long)entry;
1718 }
1719
register_jprobes(struct jprobe ** jps,int num)1720 int __kprobes register_jprobes(struct jprobe **jps, int num)
1721 {
1722 struct jprobe *jp;
1723 int ret = 0, i;
1724
1725 if (num <= 0)
1726 return -EINVAL;
1727 for (i = 0; i < num; i++) {
1728 unsigned long addr, offset;
1729 jp = jps[i];
1730 addr = arch_deref_entry_point(jp->entry);
1731
1732 /* Verify probepoint is a function entry point */
1733 if (kallsyms_lookup_size_offset(addr, NULL, &offset) &&
1734 offset == 0) {
1735 jp->kp.pre_handler = setjmp_pre_handler;
1736 jp->kp.break_handler = longjmp_break_handler;
1737 ret = register_kprobe(&jp->kp);
1738 } else
1739 ret = -EINVAL;
1740
1741 if (ret < 0) {
1742 if (i > 0)
1743 unregister_jprobes(jps, i);
1744 break;
1745 }
1746 }
1747 return ret;
1748 }
1749 EXPORT_SYMBOL_GPL(register_jprobes);
1750
register_jprobe(struct jprobe * jp)1751 int __kprobes register_jprobe(struct jprobe *jp)
1752 {
1753 return register_jprobes(&jp, 1);
1754 }
1755 EXPORT_SYMBOL_GPL(register_jprobe);
1756
unregister_jprobe(struct jprobe * jp)1757 void __kprobes unregister_jprobe(struct jprobe *jp)
1758 {
1759 unregister_jprobes(&jp, 1);
1760 }
1761 EXPORT_SYMBOL_GPL(unregister_jprobe);
1762
unregister_jprobes(struct jprobe ** jps,int num)1763 void __kprobes unregister_jprobes(struct jprobe **jps, int num)
1764 {
1765 int i;
1766
1767 if (num <= 0)
1768 return;
1769 mutex_lock(&kprobe_mutex);
1770 for (i = 0; i < num; i++)
1771 if (__unregister_kprobe_top(&jps[i]->kp) < 0)
1772 jps[i]->kp.addr = NULL;
1773 mutex_unlock(&kprobe_mutex);
1774
1775 synchronize_sched();
1776 for (i = 0; i < num; i++) {
1777 if (jps[i]->kp.addr)
1778 __unregister_kprobe_bottom(&jps[i]->kp);
1779 }
1780 }
1781 EXPORT_SYMBOL_GPL(unregister_jprobes);
1782
1783 #ifdef CONFIG_KRETPROBES
1784 /*
1785 * This kprobe pre_handler is registered with every kretprobe. When probe
1786 * hits it will set up the return probe.
1787 */
pre_handler_kretprobe(struct kprobe * p,struct pt_regs * regs)1788 static int __kprobes pre_handler_kretprobe(struct kprobe *p,
1789 struct pt_regs *regs)
1790 {
1791 struct kretprobe *rp = container_of(p, struct kretprobe, kp);
1792 unsigned long hash, flags = 0;
1793 struct kretprobe_instance *ri;
1794
1795 /*TODO: consider to only swap the RA after the last pre_handler fired */
1796 hash = hash_ptr(current, KPROBE_HASH_BITS);
1797 raw_spin_lock_irqsave(&rp->lock, flags);
1798 if (!hlist_empty(&rp->free_instances)) {
1799 ri = hlist_entry(rp->free_instances.first,
1800 struct kretprobe_instance, hlist);
1801 hlist_del(&ri->hlist);
1802 raw_spin_unlock_irqrestore(&rp->lock, flags);
1803
1804 ri->rp = rp;
1805 ri->task = current;
1806
1807 if (rp->entry_handler && rp->entry_handler(ri, regs)) {
1808 raw_spin_lock_irqsave(&rp->lock, flags);
1809 hlist_add_head(&ri->hlist, &rp->free_instances);
1810 raw_spin_unlock_irqrestore(&rp->lock, flags);
1811 return 0;
1812 }
1813
1814 arch_prepare_kretprobe(ri, regs);
1815
1816 /* XXX(hch): why is there no hlist_move_head? */
1817 INIT_HLIST_NODE(&ri->hlist);
1818 kretprobe_table_lock(hash, &flags);
1819 hlist_add_head(&ri->hlist, &kretprobe_inst_table[hash]);
1820 kretprobe_table_unlock(hash, &flags);
1821 } else {
1822 rp->nmissed++;
1823 raw_spin_unlock_irqrestore(&rp->lock, flags);
1824 }
1825 return 0;
1826 }
1827
register_kretprobe(struct kretprobe * rp)1828 int __kprobes register_kretprobe(struct kretprobe *rp)
1829 {
1830 int ret = 0;
1831 struct kretprobe_instance *inst;
1832 int i;
1833 void *addr;
1834
1835 if (kretprobe_blacklist_size) {
1836 addr = kprobe_addr(&rp->kp);
1837 if (IS_ERR(addr))
1838 return PTR_ERR(addr);
1839
1840 for (i = 0; kretprobe_blacklist[i].name != NULL; i++) {
1841 if (kretprobe_blacklist[i].addr == addr)
1842 return -EINVAL;
1843 }
1844 }
1845
1846 rp->kp.pre_handler = pre_handler_kretprobe;
1847 rp->kp.post_handler = NULL;
1848 rp->kp.fault_handler = NULL;
1849 rp->kp.break_handler = NULL;
1850
1851 /* Pre-allocate memory for max kretprobe instances */
1852 if (rp->maxactive <= 0) {
1853 #ifdef CONFIG_PREEMPT
1854 rp->maxactive = max_t(unsigned int, 10, 2*num_possible_cpus());
1855 #else
1856 rp->maxactive = num_possible_cpus();
1857 #endif
1858 }
1859 raw_spin_lock_init(&rp->lock);
1860 INIT_HLIST_HEAD(&rp->free_instances);
1861 for (i = 0; i < rp->maxactive; i++) {
1862 inst = kmalloc(sizeof(struct kretprobe_instance) +
1863 rp->data_size, GFP_KERNEL);
1864 if (inst == NULL) {
1865 free_rp_inst(rp);
1866 return -ENOMEM;
1867 }
1868 INIT_HLIST_NODE(&inst->hlist);
1869 hlist_add_head(&inst->hlist, &rp->free_instances);
1870 }
1871
1872 rp->nmissed = 0;
1873 /* Establish function entry probe point */
1874 ret = register_kprobe(&rp->kp);
1875 if (ret != 0)
1876 free_rp_inst(rp);
1877 return ret;
1878 }
1879 EXPORT_SYMBOL_GPL(register_kretprobe);
1880
register_kretprobes(struct kretprobe ** rps,int num)1881 int __kprobes register_kretprobes(struct kretprobe **rps, int num)
1882 {
1883 int ret = 0, i;
1884
1885 if (num <= 0)
1886 return -EINVAL;
1887 for (i = 0; i < num; i++) {
1888 ret = register_kretprobe(rps[i]);
1889 if (ret < 0) {
1890 if (i > 0)
1891 unregister_kretprobes(rps, i);
1892 break;
1893 }
1894 }
1895 return ret;
1896 }
1897 EXPORT_SYMBOL_GPL(register_kretprobes);
1898
unregister_kretprobe(struct kretprobe * rp)1899 void __kprobes unregister_kretprobe(struct kretprobe *rp)
1900 {
1901 unregister_kretprobes(&rp, 1);
1902 }
1903 EXPORT_SYMBOL_GPL(unregister_kretprobe);
1904
unregister_kretprobes(struct kretprobe ** rps,int num)1905 void __kprobes unregister_kretprobes(struct kretprobe **rps, int num)
1906 {
1907 int i;
1908
1909 if (num <= 0)
1910 return;
1911 mutex_lock(&kprobe_mutex);
1912 for (i = 0; i < num; i++)
1913 if (__unregister_kprobe_top(&rps[i]->kp) < 0)
1914 rps[i]->kp.addr = NULL;
1915 mutex_unlock(&kprobe_mutex);
1916
1917 synchronize_sched();
1918 for (i = 0; i < num; i++) {
1919 if (rps[i]->kp.addr) {
1920 __unregister_kprobe_bottom(&rps[i]->kp);
1921 cleanup_rp_inst(rps[i]);
1922 }
1923 }
1924 }
1925 EXPORT_SYMBOL_GPL(unregister_kretprobes);
1926
1927 #else /* CONFIG_KRETPROBES */
register_kretprobe(struct kretprobe * rp)1928 int __kprobes register_kretprobe(struct kretprobe *rp)
1929 {
1930 return -ENOSYS;
1931 }
1932 EXPORT_SYMBOL_GPL(register_kretprobe);
1933
register_kretprobes(struct kretprobe ** rps,int num)1934 int __kprobes register_kretprobes(struct kretprobe **rps, int num)
1935 {
1936 return -ENOSYS;
1937 }
1938 EXPORT_SYMBOL_GPL(register_kretprobes);
1939
unregister_kretprobe(struct kretprobe * rp)1940 void __kprobes unregister_kretprobe(struct kretprobe *rp)
1941 {
1942 }
1943 EXPORT_SYMBOL_GPL(unregister_kretprobe);
1944
unregister_kretprobes(struct kretprobe ** rps,int num)1945 void __kprobes unregister_kretprobes(struct kretprobe **rps, int num)
1946 {
1947 }
1948 EXPORT_SYMBOL_GPL(unregister_kretprobes);
1949
pre_handler_kretprobe(struct kprobe * p,struct pt_regs * regs)1950 static int __kprobes pre_handler_kretprobe(struct kprobe *p,
1951 struct pt_regs *regs)
1952 {
1953 return 0;
1954 }
1955
1956 #endif /* CONFIG_KRETPROBES */
1957
1958 /* Set the kprobe gone and remove its instruction buffer. */
kill_kprobe(struct kprobe * p)1959 static void __kprobes kill_kprobe(struct kprobe *p)
1960 {
1961 struct kprobe *kp;
1962
1963 p->flags |= KPROBE_FLAG_GONE;
1964 if (kprobe_aggrprobe(p)) {
1965 /*
1966 * If this is an aggr_kprobe, we have to list all the
1967 * chained probes and mark them GONE.
1968 */
1969 list_for_each_entry_rcu(kp, &p->list, list)
1970 kp->flags |= KPROBE_FLAG_GONE;
1971 p->post_handler = NULL;
1972 p->break_handler = NULL;
1973 kill_optimized_kprobe(p);
1974 }
1975 /*
1976 * Here, we can remove insn_slot safely, because no thread calls
1977 * the original probed function (which will be freed soon) any more.
1978 */
1979 arch_remove_kprobe(p);
1980 }
1981
1982 /* Disable one kprobe */
disable_kprobe(struct kprobe * kp)1983 int __kprobes disable_kprobe(struct kprobe *kp)
1984 {
1985 int ret = 0;
1986
1987 mutex_lock(&kprobe_mutex);
1988
1989 /* Disable this kprobe */
1990 if (__disable_kprobe(kp) == NULL)
1991 ret = -EINVAL;
1992
1993 mutex_unlock(&kprobe_mutex);
1994 return ret;
1995 }
1996 EXPORT_SYMBOL_GPL(disable_kprobe);
1997
1998 /* Enable one kprobe */
enable_kprobe(struct kprobe * kp)1999 int __kprobes enable_kprobe(struct kprobe *kp)
2000 {
2001 int ret = 0;
2002 struct kprobe *p;
2003
2004 mutex_lock(&kprobe_mutex);
2005
2006 /* Check whether specified probe is valid. */
2007 p = __get_valid_kprobe(kp);
2008 if (unlikely(p == NULL)) {
2009 ret = -EINVAL;
2010 goto out;
2011 }
2012
2013 if (kprobe_gone(kp)) {
2014 /* This kprobe has gone, we couldn't enable it. */
2015 ret = -EINVAL;
2016 goto out;
2017 }
2018
2019 if (p != kp)
2020 kp->flags &= ~KPROBE_FLAG_DISABLED;
2021
2022 if (!kprobes_all_disarmed && kprobe_disabled(p)) {
2023 p->flags &= ~KPROBE_FLAG_DISABLED;
2024 arm_kprobe(p);
2025 }
2026 out:
2027 mutex_unlock(&kprobe_mutex);
2028 return ret;
2029 }
2030 EXPORT_SYMBOL_GPL(enable_kprobe);
2031
dump_kprobe(struct kprobe * kp)2032 void __kprobes dump_kprobe(struct kprobe *kp)
2033 {
2034 printk(KERN_WARNING "Dumping kprobe:\n");
2035 printk(KERN_WARNING "Name: %s\nAddress: %p\nOffset: %x\n",
2036 kp->symbol_name, kp->addr, kp->offset);
2037 }
2038
2039 /* Module notifier call back, checking kprobes on the module */
kprobes_module_callback(struct notifier_block * nb,unsigned long val,void * data)2040 static int __kprobes kprobes_module_callback(struct notifier_block *nb,
2041 unsigned long val, void *data)
2042 {
2043 struct module *mod = data;
2044 struct hlist_head *head;
2045 struct kprobe *p;
2046 unsigned int i;
2047 int checkcore = (val == MODULE_STATE_GOING);
2048
2049 if (val != MODULE_STATE_GOING && val != MODULE_STATE_LIVE)
2050 return NOTIFY_DONE;
2051
2052 /*
2053 * When MODULE_STATE_GOING was notified, both of module .text and
2054 * .init.text sections would be freed. When MODULE_STATE_LIVE was
2055 * notified, only .init.text section would be freed. We need to
2056 * disable kprobes which have been inserted in the sections.
2057 */
2058 mutex_lock(&kprobe_mutex);
2059 for (i = 0; i < KPROBE_TABLE_SIZE; i++) {
2060 head = &kprobe_table[i];
2061 hlist_for_each_entry_rcu(p, head, hlist)
2062 if (within_module_init((unsigned long)p->addr, mod) ||
2063 (checkcore &&
2064 within_module_core((unsigned long)p->addr, mod))) {
2065 /*
2066 * The vaddr this probe is installed will soon
2067 * be vfreed buy not synced to disk. Hence,
2068 * disarming the breakpoint isn't needed.
2069 */
2070 kill_kprobe(p);
2071 }
2072 }
2073 mutex_unlock(&kprobe_mutex);
2074 return NOTIFY_DONE;
2075 }
2076
2077 static struct notifier_block kprobe_module_nb = {
2078 .notifier_call = kprobes_module_callback,
2079 .priority = 0
2080 };
2081
init_kprobes(void)2082 static int __init init_kprobes(void)
2083 {
2084 int i, err = 0;
2085 unsigned long offset = 0, size = 0;
2086 char *modname, namebuf[128];
2087 const char *symbol_name;
2088 void *addr;
2089 struct kprobe_blackpoint *kb;
2090
2091 /* FIXME allocate the probe table, currently defined statically */
2092 /* initialize all list heads */
2093 for (i = 0; i < KPROBE_TABLE_SIZE; i++) {
2094 INIT_HLIST_HEAD(&kprobe_table[i]);
2095 INIT_HLIST_HEAD(&kretprobe_inst_table[i]);
2096 raw_spin_lock_init(&(kretprobe_table_locks[i].lock));
2097 }
2098
2099 /*
2100 * Lookup and populate the kprobe_blacklist.
2101 *
2102 * Unlike the kretprobe blacklist, we'll need to determine
2103 * the range of addresses that belong to the said functions,
2104 * since a kprobe need not necessarily be at the beginning
2105 * of a function.
2106 */
2107 for (kb = kprobe_blacklist; kb->name != NULL; kb++) {
2108 kprobe_lookup_name(kb->name, addr);
2109 if (!addr)
2110 continue;
2111
2112 kb->start_addr = (unsigned long)addr;
2113 symbol_name = kallsyms_lookup(kb->start_addr,
2114 &size, &offset, &modname, namebuf);
2115 if (!symbol_name)
2116 kb->range = 0;
2117 else
2118 kb->range = size;
2119 }
2120
2121 if (kretprobe_blacklist_size) {
2122 /* lookup the function address from its name */
2123 for (i = 0; kretprobe_blacklist[i].name != NULL; i++) {
2124 kprobe_lookup_name(kretprobe_blacklist[i].name,
2125 kretprobe_blacklist[i].addr);
2126 if (!kretprobe_blacklist[i].addr)
2127 printk("kretprobe: lookup failed: %s\n",
2128 kretprobe_blacklist[i].name);
2129 }
2130 }
2131
2132 #if defined(CONFIG_OPTPROBES)
2133 #if defined(__ARCH_WANT_KPROBES_INSN_SLOT)
2134 /* Init kprobe_optinsn_slots */
2135 kprobe_optinsn_slots.insn_size = MAX_OPTINSN_SIZE;
2136 #endif
2137 /* By default, kprobes can be optimized */
2138 kprobes_allow_optimization = true;
2139 #endif
2140
2141 /* By default, kprobes are armed */
2142 kprobes_all_disarmed = false;
2143
2144 err = arch_init_kprobes();
2145 if (!err)
2146 err = register_die_notifier(&kprobe_exceptions_nb);
2147 if (!err)
2148 err = register_module_notifier(&kprobe_module_nb);
2149
2150 kprobes_initialized = (err == 0);
2151
2152 if (!err)
2153 init_test_probes();
2154 return err;
2155 }
2156
2157 #ifdef CONFIG_DEBUG_FS
report_probe(struct seq_file * pi,struct kprobe * p,const char * sym,int offset,char * modname,struct kprobe * pp)2158 static void __kprobes report_probe(struct seq_file *pi, struct kprobe *p,
2159 const char *sym, int offset, char *modname, struct kprobe *pp)
2160 {
2161 char *kprobe_type;
2162
2163 if (p->pre_handler == pre_handler_kretprobe)
2164 kprobe_type = "r";
2165 else if (p->pre_handler == setjmp_pre_handler)
2166 kprobe_type = "j";
2167 else
2168 kprobe_type = "k";
2169
2170 if (sym)
2171 seq_printf(pi, "%p %s %s+0x%x %s ",
2172 p->addr, kprobe_type, sym, offset,
2173 (modname ? modname : " "));
2174 else
2175 seq_printf(pi, "%p %s %p ",
2176 p->addr, kprobe_type, p->addr);
2177
2178 if (!pp)
2179 pp = p;
2180 seq_printf(pi, "%s%s%s%s\n",
2181 (kprobe_gone(p) ? "[GONE]" : ""),
2182 ((kprobe_disabled(p) && !kprobe_gone(p)) ? "[DISABLED]" : ""),
2183 (kprobe_optimized(pp) ? "[OPTIMIZED]" : ""),
2184 (kprobe_ftrace(pp) ? "[FTRACE]" : ""));
2185 }
2186
kprobe_seq_start(struct seq_file * f,loff_t * pos)2187 static void __kprobes *kprobe_seq_start(struct seq_file *f, loff_t *pos)
2188 {
2189 return (*pos < KPROBE_TABLE_SIZE) ? pos : NULL;
2190 }
2191
kprobe_seq_next(struct seq_file * f,void * v,loff_t * pos)2192 static void __kprobes *kprobe_seq_next(struct seq_file *f, void *v, loff_t *pos)
2193 {
2194 (*pos)++;
2195 if (*pos >= KPROBE_TABLE_SIZE)
2196 return NULL;
2197 return pos;
2198 }
2199
kprobe_seq_stop(struct seq_file * f,void * v)2200 static void __kprobes kprobe_seq_stop(struct seq_file *f, void *v)
2201 {
2202 /* Nothing to do */
2203 }
2204
show_kprobe_addr(struct seq_file * pi,void * v)2205 static int __kprobes show_kprobe_addr(struct seq_file *pi, void *v)
2206 {
2207 struct hlist_head *head;
2208 struct kprobe *p, *kp;
2209 const char *sym = NULL;
2210 unsigned int i = *(loff_t *) v;
2211 unsigned long offset = 0;
2212 char *modname, namebuf[128];
2213
2214 head = &kprobe_table[i];
2215 preempt_disable();
2216 hlist_for_each_entry_rcu(p, head, hlist) {
2217 sym = kallsyms_lookup((unsigned long)p->addr, NULL,
2218 &offset, &modname, namebuf);
2219 if (kprobe_aggrprobe(p)) {
2220 list_for_each_entry_rcu(kp, &p->list, list)
2221 report_probe(pi, kp, sym, offset, modname, p);
2222 } else
2223 report_probe(pi, p, sym, offset, modname, NULL);
2224 }
2225 preempt_enable();
2226 return 0;
2227 }
2228
2229 static const struct seq_operations kprobes_seq_ops = {
2230 .start = kprobe_seq_start,
2231 .next = kprobe_seq_next,
2232 .stop = kprobe_seq_stop,
2233 .show = show_kprobe_addr
2234 };
2235
kprobes_open(struct inode * inode,struct file * filp)2236 static int __kprobes kprobes_open(struct inode *inode, struct file *filp)
2237 {
2238 return seq_open(filp, &kprobes_seq_ops);
2239 }
2240
2241 static const struct file_operations debugfs_kprobes_operations = {
2242 .open = kprobes_open,
2243 .read = seq_read,
2244 .llseek = seq_lseek,
2245 .release = seq_release,
2246 };
2247
arm_all_kprobes(void)2248 static void __kprobes arm_all_kprobes(void)
2249 {
2250 struct hlist_head *head;
2251 struct kprobe *p;
2252 unsigned int i;
2253
2254 mutex_lock(&kprobe_mutex);
2255
2256 /* If kprobes are armed, just return */
2257 if (!kprobes_all_disarmed)
2258 goto already_enabled;
2259
2260 /* Arming kprobes doesn't optimize kprobe itself */
2261 for (i = 0; i < KPROBE_TABLE_SIZE; i++) {
2262 head = &kprobe_table[i];
2263 hlist_for_each_entry_rcu(p, head, hlist)
2264 if (!kprobe_disabled(p))
2265 arm_kprobe(p);
2266 }
2267
2268 kprobes_all_disarmed = false;
2269 printk(KERN_INFO "Kprobes globally enabled\n");
2270
2271 already_enabled:
2272 mutex_unlock(&kprobe_mutex);
2273 return;
2274 }
2275
disarm_all_kprobes(void)2276 static void __kprobes disarm_all_kprobes(void)
2277 {
2278 struct hlist_head *head;
2279 struct kprobe *p;
2280 unsigned int i;
2281
2282 mutex_lock(&kprobe_mutex);
2283
2284 /* If kprobes are already disarmed, just return */
2285 if (kprobes_all_disarmed) {
2286 mutex_unlock(&kprobe_mutex);
2287 return;
2288 }
2289
2290 kprobes_all_disarmed = true;
2291 printk(KERN_INFO "Kprobes globally disabled\n");
2292
2293 for (i = 0; i < KPROBE_TABLE_SIZE; i++) {
2294 head = &kprobe_table[i];
2295 hlist_for_each_entry_rcu(p, head, hlist) {
2296 if (!arch_trampoline_kprobe(p) && !kprobe_disabled(p))
2297 disarm_kprobe(p, false);
2298 }
2299 }
2300 mutex_unlock(&kprobe_mutex);
2301
2302 /* Wait for disarming all kprobes by optimizer */
2303 wait_for_kprobe_optimizer();
2304 }
2305
2306 /*
2307 * XXX: The debugfs bool file interface doesn't allow for callbacks
2308 * when the bool state is switched. We can reuse that facility when
2309 * available
2310 */
read_enabled_file_bool(struct file * file,char __user * user_buf,size_t count,loff_t * ppos)2311 static ssize_t read_enabled_file_bool(struct file *file,
2312 char __user *user_buf, size_t count, loff_t *ppos)
2313 {
2314 char buf[3];
2315
2316 if (!kprobes_all_disarmed)
2317 buf[0] = '1';
2318 else
2319 buf[0] = '0';
2320 buf[1] = '\n';
2321 buf[2] = 0x00;
2322 return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
2323 }
2324
write_enabled_file_bool(struct file * file,const char __user * user_buf,size_t count,loff_t * ppos)2325 static ssize_t write_enabled_file_bool(struct file *file,
2326 const char __user *user_buf, size_t count, loff_t *ppos)
2327 {
2328 char buf[32];
2329 size_t buf_size;
2330
2331 buf_size = min(count, (sizeof(buf)-1));
2332 if (copy_from_user(buf, user_buf, buf_size))
2333 return -EFAULT;
2334
2335 switch (buf[0]) {
2336 case 'y':
2337 case 'Y':
2338 case '1':
2339 arm_all_kprobes();
2340 break;
2341 case 'n':
2342 case 'N':
2343 case '0':
2344 disarm_all_kprobes();
2345 break;
2346 }
2347
2348 return count;
2349 }
2350
2351 static const struct file_operations fops_kp = {
2352 .read = read_enabled_file_bool,
2353 .write = write_enabled_file_bool,
2354 .llseek = default_llseek,
2355 };
2356
debugfs_kprobe_init(void)2357 static int __kprobes debugfs_kprobe_init(void)
2358 {
2359 struct dentry *dir, *file;
2360 unsigned int value = 1;
2361
2362 dir = debugfs_create_dir("kprobes", NULL);
2363 if (!dir)
2364 return -ENOMEM;
2365
2366 file = debugfs_create_file("list", 0444, dir, NULL,
2367 &debugfs_kprobes_operations);
2368 if (!file) {
2369 debugfs_remove(dir);
2370 return -ENOMEM;
2371 }
2372
2373 file = debugfs_create_file("enabled", 0600, dir,
2374 &value, &fops_kp);
2375 if (!file) {
2376 debugfs_remove(dir);
2377 return -ENOMEM;
2378 }
2379
2380 return 0;
2381 }
2382
2383 late_initcall(debugfs_kprobe_init);
2384 #endif /* CONFIG_DEBUG_FS */
2385
2386 module_init(init_kprobes);
2387
2388 /* defined in arch/.../kernel/kprobes.c */
2389 EXPORT_SYMBOL_GPL(jprobe_return);
2390