• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3  *  Kernel Probes (KProbes)
4  *  kernel/kprobes.c
5  *
6  * Copyright (C) IBM Corporation, 2002, 2004
7  *
8  * 2002-Oct	Created by Vamsi Krishna S <vamsi_krishna@in.ibm.com> Kernel
9  *		Probes initial implementation (includes suggestions from
10  *		Rusty Russell).
11  * 2004-Aug	Updated by Prasanna S Panchamukhi <prasanna@in.ibm.com> with
12  *		hlists and exceptions notifier as suggested by Andi Kleen.
13  * 2004-July	Suparna Bhattacharya <suparna@in.ibm.com> added jumper probes
14  *		interface to access function arguments.
15  * 2004-Sep	Prasanna S Panchamukhi <prasanna@in.ibm.com> Changed Kprobes
16  *		exceptions notifier to be first on the priority list.
17  * 2005-May	Hien Nguyen <hien@us.ibm.com>, Jim Keniston
18  *		<jkenisto@us.ibm.com> and Prasanna S Panchamukhi
19  *		<prasanna@in.ibm.com> added function-return probes.
20  */
21 #include <linux/kprobes.h>
22 #include <linux/hash.h>
23 #include <linux/init.h>
24 #include <linux/slab.h>
25 #include <linux/stddef.h>
26 #include <linux/export.h>
27 #include <linux/moduleloader.h>
28 #include <linux/kallsyms.h>
29 #include <linux/freezer.h>
30 #include <linux/seq_file.h>
31 #include <linux/debugfs.h>
32 #include <linux/sysctl.h>
33 #include <linux/kdebug.h>
34 #include <linux/memory.h>
35 #include <linux/ftrace.h>
36 #include <linux/cpu.h>
37 #include <linux/jump_label.h>
38 #include <linux/perf_event.h>
39 #include <linux/static_call.h>
40 
41 #include <asm/sections.h>
42 #include <asm/cacheflush.h>
43 #include <asm/errno.h>
44 #include <linux/uaccess.h>
45 
46 #define KPROBE_HASH_BITS 6
47 #define KPROBE_TABLE_SIZE (1 << KPROBE_HASH_BITS)
48 
49 
50 static int kprobes_initialized;
51 /* kprobe_table can be accessed by
52  * - Normal hlist traversal and RCU add/del under kprobe_mutex is held.
53  * Or
54  * - RCU hlist traversal under disabling preempt (breakpoint handlers)
55  */
56 static struct hlist_head kprobe_table[KPROBE_TABLE_SIZE];
57 static struct hlist_head kretprobe_inst_table[KPROBE_TABLE_SIZE];
58 
59 /* NOTE: change this value only with kprobe_mutex held */
60 static bool kprobes_all_disarmed;
61 
62 /* This protects kprobe_table and optimizing_list */
63 static DEFINE_MUTEX(kprobe_mutex);
64 static DEFINE_PER_CPU(struct kprobe *, kprobe_instance) = NULL;
65 static struct {
66 	raw_spinlock_t lock ____cacheline_aligned_in_smp;
67 } kretprobe_table_locks[KPROBE_TABLE_SIZE];
68 
kprobe_lookup_name(const char * name,unsigned int __unused)69 kprobe_opcode_t * __weak kprobe_lookup_name(const char *name,
70 					unsigned int __unused)
71 {
72 	return ((kprobe_opcode_t *)(kallsyms_lookup_name(name)));
73 }
74 
kretprobe_table_lock_ptr(unsigned long hash)75 static raw_spinlock_t *kretprobe_table_lock_ptr(unsigned long hash)
76 {
77 	return &(kretprobe_table_locks[hash].lock);
78 }
79 
80 /* Blacklist -- list of struct kprobe_blacklist_entry */
81 static LIST_HEAD(kprobe_blacklist);
82 
83 #ifdef __ARCH_WANT_KPROBES_INSN_SLOT
84 /*
85  * kprobe->ainsn.insn points to the copy of the instruction to be
86  * single-stepped. x86_64, POWER4 and above have no-exec support and
87  * stepping on the instruction on a vmalloced/kmalloced/data page
88  * is a recipe for disaster
89  */
90 struct kprobe_insn_page {
91 	struct list_head list;
92 	kprobe_opcode_t *insns;		/* Page of instruction slots */
93 	struct kprobe_insn_cache *cache;
94 	int nused;
95 	int ngarbage;
96 	char slot_used[];
97 };
98 
99 #define KPROBE_INSN_PAGE_SIZE(slots)			\
100 	(offsetof(struct kprobe_insn_page, slot_used) +	\
101 	 (sizeof(char) * (slots)))
102 
slots_per_page(struct kprobe_insn_cache * c)103 static int slots_per_page(struct kprobe_insn_cache *c)
104 {
105 	return PAGE_SIZE/(c->insn_size * sizeof(kprobe_opcode_t));
106 }
107 
108 enum kprobe_slot_state {
109 	SLOT_CLEAN = 0,
110 	SLOT_DIRTY = 1,
111 	SLOT_USED = 2,
112 };
113 
alloc_insn_page(void)114 void __weak *alloc_insn_page(void)
115 {
116 	return module_alloc(PAGE_SIZE);
117 }
118 
free_insn_page(void * page)119 void __weak free_insn_page(void *page)
120 {
121 	module_memfree(page);
122 }
123 
124 struct kprobe_insn_cache kprobe_insn_slots = {
125 	.mutex = __MUTEX_INITIALIZER(kprobe_insn_slots.mutex),
126 	.alloc = alloc_insn_page,
127 	.free = free_insn_page,
128 	.sym = KPROBE_INSN_PAGE_SYM,
129 	.pages = LIST_HEAD_INIT(kprobe_insn_slots.pages),
130 	.insn_size = MAX_INSN_SIZE,
131 	.nr_garbage = 0,
132 };
133 static int collect_garbage_slots(struct kprobe_insn_cache *c);
134 
135 /**
136  * __get_insn_slot() - Find a slot on an executable page for an instruction.
137  * We allocate an executable page if there's no room on existing ones.
138  */
__get_insn_slot(struct kprobe_insn_cache * c)139 kprobe_opcode_t *__get_insn_slot(struct kprobe_insn_cache *c)
140 {
141 	struct kprobe_insn_page *kip;
142 	kprobe_opcode_t *slot = NULL;
143 
144 	/* Since the slot array is not protected by rcu, we need a mutex */
145 	mutex_lock(&c->mutex);
146  retry:
147 	rcu_read_lock();
148 	list_for_each_entry_rcu(kip, &c->pages, list) {
149 		if (kip->nused < slots_per_page(c)) {
150 			int i;
151 			for (i = 0; i < slots_per_page(c); i++) {
152 				if (kip->slot_used[i] == SLOT_CLEAN) {
153 					kip->slot_used[i] = SLOT_USED;
154 					kip->nused++;
155 					slot = kip->insns + (i * c->insn_size);
156 					rcu_read_unlock();
157 					goto out;
158 				}
159 			}
160 			/* kip->nused is broken. Fix it. */
161 			kip->nused = slots_per_page(c);
162 			WARN_ON(1);
163 		}
164 	}
165 	rcu_read_unlock();
166 
167 	/* If there are any garbage slots, collect it and try again. */
168 	if (c->nr_garbage && collect_garbage_slots(c) == 0)
169 		goto retry;
170 
171 	/* All out of space.  Need to allocate a new page. */
172 	kip = kmalloc(KPROBE_INSN_PAGE_SIZE(slots_per_page(c)), GFP_KERNEL);
173 	if (!kip)
174 		goto out;
175 
176 	/*
177 	 * Use module_alloc so this page is within +/- 2GB of where the
178 	 * kernel image and loaded module images reside. This is required
179 	 * so x86_64 can correctly handle the %rip-relative fixups.
180 	 */
181 	kip->insns = c->alloc();
182 	if (!kip->insns) {
183 		kfree(kip);
184 		goto out;
185 	}
186 	INIT_LIST_HEAD(&kip->list);
187 	memset(kip->slot_used, SLOT_CLEAN, slots_per_page(c));
188 	kip->slot_used[0] = SLOT_USED;
189 	kip->nused = 1;
190 	kip->ngarbage = 0;
191 	kip->cache = c;
192 	list_add_rcu(&kip->list, &c->pages);
193 	slot = kip->insns;
194 
195 	/* Record the perf ksymbol register event after adding the page */
196 	perf_event_ksymbol(PERF_RECORD_KSYMBOL_TYPE_OOL, (unsigned long)kip->insns,
197 			   PAGE_SIZE, false, c->sym);
198 out:
199 	mutex_unlock(&c->mutex);
200 	return slot;
201 }
202 
203 /* Return 1 if all garbages are collected, otherwise 0. */
collect_one_slot(struct kprobe_insn_page * kip,int idx)204 static int collect_one_slot(struct kprobe_insn_page *kip, int idx)
205 {
206 	kip->slot_used[idx] = SLOT_CLEAN;
207 	kip->nused--;
208 	if (kip->nused == 0) {
209 		/*
210 		 * Page is no longer in use.  Free it unless
211 		 * it's the last one.  We keep the last one
212 		 * so as not to have to set it up again the
213 		 * next time somebody inserts a probe.
214 		 */
215 		if (!list_is_singular(&kip->list)) {
216 			/*
217 			 * Record perf ksymbol unregister event before removing
218 			 * the page.
219 			 */
220 			perf_event_ksymbol(PERF_RECORD_KSYMBOL_TYPE_OOL,
221 					   (unsigned long)kip->insns, PAGE_SIZE, true,
222 					   kip->cache->sym);
223 			list_del_rcu(&kip->list);
224 			synchronize_rcu();
225 			kip->cache->free(kip->insns);
226 			kfree(kip);
227 		}
228 		return 1;
229 	}
230 	return 0;
231 }
232 
collect_garbage_slots(struct kprobe_insn_cache * c)233 static int collect_garbage_slots(struct kprobe_insn_cache *c)
234 {
235 	struct kprobe_insn_page *kip, *next;
236 
237 	/* Ensure no-one is interrupted on the garbages */
238 	synchronize_rcu();
239 
240 	list_for_each_entry_safe(kip, next, &c->pages, list) {
241 		int i;
242 		if (kip->ngarbage == 0)
243 			continue;
244 		kip->ngarbage = 0;	/* we will collect all garbages */
245 		for (i = 0; i < slots_per_page(c); i++) {
246 			if (kip->slot_used[i] == SLOT_DIRTY && collect_one_slot(kip, i))
247 				break;
248 		}
249 	}
250 	c->nr_garbage = 0;
251 	return 0;
252 }
253 
__free_insn_slot(struct kprobe_insn_cache * c,kprobe_opcode_t * slot,int dirty)254 void __free_insn_slot(struct kprobe_insn_cache *c,
255 		      kprobe_opcode_t *slot, int dirty)
256 {
257 	struct kprobe_insn_page *kip;
258 	long idx;
259 
260 	mutex_lock(&c->mutex);
261 	rcu_read_lock();
262 	list_for_each_entry_rcu(kip, &c->pages, list) {
263 		idx = ((long)slot - (long)kip->insns) /
264 			(c->insn_size * sizeof(kprobe_opcode_t));
265 		if (idx >= 0 && idx < slots_per_page(c))
266 			goto out;
267 	}
268 	/* Could not find this slot. */
269 	WARN_ON(1);
270 	kip = NULL;
271 out:
272 	rcu_read_unlock();
273 	/* Mark and sweep: this may sleep */
274 	if (kip) {
275 		/* Check double free */
276 		WARN_ON(kip->slot_used[idx] != SLOT_USED);
277 		if (dirty) {
278 			kip->slot_used[idx] = SLOT_DIRTY;
279 			kip->ngarbage++;
280 			if (++c->nr_garbage > slots_per_page(c))
281 				collect_garbage_slots(c);
282 		} else {
283 			collect_one_slot(kip, idx);
284 		}
285 	}
286 	mutex_unlock(&c->mutex);
287 }
288 
289 /*
290  * Check given address is on the page of kprobe instruction slots.
291  * This will be used for checking whether the address on a stack
292  * is on a text area or not.
293  */
__is_insn_slot_addr(struct kprobe_insn_cache * c,unsigned long addr)294 bool __is_insn_slot_addr(struct kprobe_insn_cache *c, unsigned long addr)
295 {
296 	struct kprobe_insn_page *kip;
297 	bool ret = false;
298 
299 	rcu_read_lock();
300 	list_for_each_entry_rcu(kip, &c->pages, list) {
301 		if (addr >= (unsigned long)kip->insns &&
302 		    addr < (unsigned long)kip->insns + PAGE_SIZE) {
303 			ret = true;
304 			break;
305 		}
306 	}
307 	rcu_read_unlock();
308 
309 	return ret;
310 }
311 
kprobe_cache_get_kallsym(struct kprobe_insn_cache * c,unsigned int * symnum,unsigned long * value,char * type,char * sym)312 int kprobe_cache_get_kallsym(struct kprobe_insn_cache *c, unsigned int *symnum,
313 			     unsigned long *value, char *type, char *sym)
314 {
315 	struct kprobe_insn_page *kip;
316 	int ret = -ERANGE;
317 
318 	rcu_read_lock();
319 	list_for_each_entry_rcu(kip, &c->pages, list) {
320 		if ((*symnum)--)
321 			continue;
322 		strlcpy(sym, c->sym, KSYM_NAME_LEN);
323 		*type = 't';
324 		*value = (unsigned long)kip->insns;
325 		ret = 0;
326 		break;
327 	}
328 	rcu_read_unlock();
329 
330 	return ret;
331 }
332 
333 #ifdef CONFIG_OPTPROBES
334 /* For optimized_kprobe buffer */
335 struct kprobe_insn_cache kprobe_optinsn_slots = {
336 	.mutex = __MUTEX_INITIALIZER(kprobe_optinsn_slots.mutex),
337 	.alloc = alloc_insn_page,
338 	.free = free_insn_page,
339 	.sym = KPROBE_OPTINSN_PAGE_SYM,
340 	.pages = LIST_HEAD_INIT(kprobe_optinsn_slots.pages),
341 	/* .insn_size is initialized later */
342 	.nr_garbage = 0,
343 };
344 #endif
345 #endif
346 
347 /* We have preemption disabled.. so it is safe to use __ versions */
set_kprobe_instance(struct kprobe * kp)348 static inline void set_kprobe_instance(struct kprobe *kp)
349 {
350 	__this_cpu_write(kprobe_instance, kp);
351 }
352 
reset_kprobe_instance(void)353 static inline void reset_kprobe_instance(void)
354 {
355 	__this_cpu_write(kprobe_instance, NULL);
356 }
357 
358 /*
359  * This routine is called either:
360  * 	- under the kprobe_mutex - during kprobe_[un]register()
361  * 				OR
362  * 	- with preemption disabled - from arch/xxx/kernel/kprobes.c
363  */
get_kprobe(void * addr)364 struct kprobe *get_kprobe(void *addr)
365 {
366 	struct hlist_head *head;
367 	struct kprobe *p;
368 
369 	head = &kprobe_table[hash_ptr(addr, KPROBE_HASH_BITS)];
370 	hlist_for_each_entry_rcu(p, head, hlist,
371 				 lockdep_is_held(&kprobe_mutex)) {
372 		if (p->addr == addr)
373 			return p;
374 	}
375 
376 	return NULL;
377 }
378 NOKPROBE_SYMBOL(get_kprobe);
379 
380 static int aggr_pre_handler(struct kprobe *p, struct pt_regs *regs);
381 
382 /* Return true if the kprobe is an aggregator */
kprobe_aggrprobe(struct kprobe * p)383 static inline int kprobe_aggrprobe(struct kprobe *p)
384 {
385 	return p->pre_handler == aggr_pre_handler;
386 }
387 
388 /* Return true(!0) if the kprobe is unused */
kprobe_unused(struct kprobe * p)389 static inline int kprobe_unused(struct kprobe *p)
390 {
391 	return kprobe_aggrprobe(p) && kprobe_disabled(p) &&
392 	       list_empty(&p->list);
393 }
394 
395 /*
396  * Keep all fields in the kprobe consistent
397  */
copy_kprobe(struct kprobe * ap,struct kprobe * p)398 static inline void copy_kprobe(struct kprobe *ap, struct kprobe *p)
399 {
400 	memcpy(&p->opcode, &ap->opcode, sizeof(kprobe_opcode_t));
401 	memcpy(&p->ainsn, &ap->ainsn, sizeof(struct arch_specific_insn));
402 }
403 
404 #ifdef CONFIG_OPTPROBES
405 /* NOTE: change this value only with kprobe_mutex held */
406 static bool kprobes_allow_optimization;
407 
408 /*
409  * Call all pre_handler on the list, but ignores its return value.
410  * This must be called from arch-dep optimized caller.
411  */
opt_pre_handler(struct kprobe * p,struct pt_regs * regs)412 void opt_pre_handler(struct kprobe *p, struct pt_regs *regs)
413 {
414 	struct kprobe *kp;
415 
416 	list_for_each_entry_rcu(kp, &p->list, list) {
417 		if (kp->pre_handler && likely(!kprobe_disabled(kp))) {
418 			set_kprobe_instance(kp);
419 			kp->pre_handler(kp, regs);
420 		}
421 		reset_kprobe_instance();
422 	}
423 }
424 NOKPROBE_SYMBOL(opt_pre_handler);
425 
426 /* Free optimized instructions and optimized_kprobe */
free_aggr_kprobe(struct kprobe * p)427 static void free_aggr_kprobe(struct kprobe *p)
428 {
429 	struct optimized_kprobe *op;
430 
431 	op = container_of(p, struct optimized_kprobe, kp);
432 	arch_remove_optimized_kprobe(op);
433 	arch_remove_kprobe(p);
434 	kfree(op);
435 }
436 
437 /* Return true(!0) if the kprobe is ready for optimization. */
kprobe_optready(struct kprobe * p)438 static inline int kprobe_optready(struct kprobe *p)
439 {
440 	struct optimized_kprobe *op;
441 
442 	if (kprobe_aggrprobe(p)) {
443 		op = container_of(p, struct optimized_kprobe, kp);
444 		return arch_prepared_optinsn(&op->optinsn);
445 	}
446 
447 	return 0;
448 }
449 
450 /* Return true(!0) if the kprobe is disarmed. Note: p must be on hash list */
kprobe_disarmed(struct kprobe * p)451 static inline int kprobe_disarmed(struct kprobe *p)
452 {
453 	struct optimized_kprobe *op;
454 
455 	/* If kprobe is not aggr/opt probe, just return kprobe is disabled */
456 	if (!kprobe_aggrprobe(p))
457 		return kprobe_disabled(p);
458 
459 	op = container_of(p, struct optimized_kprobe, kp);
460 
461 	return kprobe_disabled(p) && list_empty(&op->list);
462 }
463 
464 /* Return true(!0) if the probe is queued on (un)optimizing lists */
kprobe_queued(struct kprobe * p)465 static int kprobe_queued(struct kprobe *p)
466 {
467 	struct optimized_kprobe *op;
468 
469 	if (kprobe_aggrprobe(p)) {
470 		op = container_of(p, struct optimized_kprobe, kp);
471 		if (!list_empty(&op->list))
472 			return 1;
473 	}
474 	return 0;
475 }
476 
477 /*
478  * Return an optimized kprobe whose optimizing code replaces
479  * instructions including addr (exclude breakpoint).
480  */
get_optimized_kprobe(unsigned long addr)481 static struct kprobe *get_optimized_kprobe(unsigned long addr)
482 {
483 	int i;
484 	struct kprobe *p = NULL;
485 	struct optimized_kprobe *op;
486 
487 	/* Don't check i == 0, since that is a breakpoint case. */
488 	for (i = 1; !p && i < MAX_OPTIMIZED_LENGTH; i++)
489 		p = get_kprobe((void *)(addr - i));
490 
491 	if (p && kprobe_optready(p)) {
492 		op = container_of(p, struct optimized_kprobe, kp);
493 		if (arch_within_optimized_kprobe(op, addr))
494 			return p;
495 	}
496 
497 	return NULL;
498 }
499 
500 /* Optimization staging list, protected by kprobe_mutex */
501 static LIST_HEAD(optimizing_list);
502 static LIST_HEAD(unoptimizing_list);
503 static LIST_HEAD(freeing_list);
504 
505 static void kprobe_optimizer(struct work_struct *work);
506 static DECLARE_DELAYED_WORK(optimizing_work, kprobe_optimizer);
507 #define OPTIMIZE_DELAY 5
508 
509 /*
510  * Optimize (replace a breakpoint with a jump) kprobes listed on
511  * optimizing_list.
512  */
do_optimize_kprobes(void)513 static void do_optimize_kprobes(void)
514 {
515 	lockdep_assert_held(&text_mutex);
516 	/*
517 	 * The optimization/unoptimization refers online_cpus via
518 	 * stop_machine() and cpu-hotplug modifies online_cpus.
519 	 * And same time, text_mutex will be held in cpu-hotplug and here.
520 	 * This combination can cause a deadlock (cpu-hotplug try to lock
521 	 * text_mutex but stop_machine can not be done because online_cpus
522 	 * has been changed)
523 	 * To avoid this deadlock, caller must have locked cpu hotplug
524 	 * for preventing cpu-hotplug outside of text_mutex locking.
525 	 */
526 	lockdep_assert_cpus_held();
527 
528 	/* Optimization never be done when disarmed */
529 	if (kprobes_all_disarmed || !kprobes_allow_optimization ||
530 	    list_empty(&optimizing_list))
531 		return;
532 
533 	arch_optimize_kprobes(&optimizing_list);
534 }
535 
536 /*
537  * Unoptimize (replace a jump with a breakpoint and remove the breakpoint
538  * if need) kprobes listed on unoptimizing_list.
539  */
do_unoptimize_kprobes(void)540 static void do_unoptimize_kprobes(void)
541 {
542 	struct optimized_kprobe *op, *tmp;
543 
544 	lockdep_assert_held(&text_mutex);
545 	/* See comment in do_optimize_kprobes() */
546 	lockdep_assert_cpus_held();
547 
548 	/* Unoptimization must be done anytime */
549 	if (list_empty(&unoptimizing_list))
550 		return;
551 
552 	arch_unoptimize_kprobes(&unoptimizing_list, &freeing_list);
553 	/* Loop free_list for disarming */
554 	list_for_each_entry_safe(op, tmp, &freeing_list, list) {
555 		/* Switching from detour code to origin */
556 		op->kp.flags &= ~KPROBE_FLAG_OPTIMIZED;
557 		/* Disarm probes if marked disabled */
558 		if (kprobe_disabled(&op->kp))
559 			arch_disarm_kprobe(&op->kp);
560 		if (kprobe_unused(&op->kp)) {
561 			/*
562 			 * Remove unused probes from hash list. After waiting
563 			 * for synchronization, these probes are reclaimed.
564 			 * (reclaiming is done by do_free_cleaned_kprobes.)
565 			 */
566 			hlist_del_rcu(&op->kp.hlist);
567 		} else
568 			list_del_init(&op->list);
569 	}
570 }
571 
572 /* Reclaim all kprobes on the free_list */
do_free_cleaned_kprobes(void)573 static void do_free_cleaned_kprobes(void)
574 {
575 	struct optimized_kprobe *op, *tmp;
576 
577 	list_for_each_entry_safe(op, tmp, &freeing_list, list) {
578 		list_del_init(&op->list);
579 		if (WARN_ON_ONCE(!kprobe_unused(&op->kp))) {
580 			/*
581 			 * This must not happen, but if there is a kprobe
582 			 * still in use, keep it on kprobes hash list.
583 			 */
584 			continue;
585 		}
586 		free_aggr_kprobe(&op->kp);
587 	}
588 }
589 
590 /* Start optimizer after OPTIMIZE_DELAY passed */
kick_kprobe_optimizer(void)591 static void kick_kprobe_optimizer(void)
592 {
593 	schedule_delayed_work(&optimizing_work, OPTIMIZE_DELAY);
594 }
595 
596 /* Kprobe jump optimizer */
kprobe_optimizer(struct work_struct * work)597 static void kprobe_optimizer(struct work_struct *work)
598 {
599 	mutex_lock(&kprobe_mutex);
600 	cpus_read_lock();
601 	mutex_lock(&text_mutex);
602 
603 	/*
604 	 * Step 1: Unoptimize kprobes and collect cleaned (unused and disarmed)
605 	 * kprobes before waiting for quiesence period.
606 	 */
607 	do_unoptimize_kprobes();
608 
609 	/*
610 	 * Step 2: Wait for quiesence period to ensure all potentially
611 	 * preempted tasks to have normally scheduled. Because optprobe
612 	 * may modify multiple instructions, there is a chance that Nth
613 	 * instruction is preempted. In that case, such tasks can return
614 	 * to 2nd-Nth byte of jump instruction. This wait is for avoiding it.
615 	 * Note that on non-preemptive kernel, this is transparently converted
616 	 * to synchronoze_sched() to wait for all interrupts to have completed.
617 	 */
618 	synchronize_rcu_tasks();
619 
620 	/* Step 3: Optimize kprobes after quiesence period */
621 	do_optimize_kprobes();
622 
623 	/* Step 4: Free cleaned kprobes after quiesence period */
624 	do_free_cleaned_kprobes();
625 
626 	mutex_unlock(&text_mutex);
627 	cpus_read_unlock();
628 
629 	/* Step 5: Kick optimizer again if needed */
630 	if (!list_empty(&optimizing_list) || !list_empty(&unoptimizing_list))
631 		kick_kprobe_optimizer();
632 
633 	mutex_unlock(&kprobe_mutex);
634 }
635 
636 /* Wait for completing optimization and unoptimization */
wait_for_kprobe_optimizer(void)637 void wait_for_kprobe_optimizer(void)
638 {
639 	mutex_lock(&kprobe_mutex);
640 
641 	while (!list_empty(&optimizing_list) || !list_empty(&unoptimizing_list)) {
642 		mutex_unlock(&kprobe_mutex);
643 
644 		/* this will also make optimizing_work execute immmediately */
645 		flush_delayed_work(&optimizing_work);
646 		/* @optimizing_work might not have been queued yet, relax */
647 		cpu_relax();
648 
649 		mutex_lock(&kprobe_mutex);
650 	}
651 
652 	mutex_unlock(&kprobe_mutex);
653 }
654 
optprobe_queued_unopt(struct optimized_kprobe * op)655 static bool optprobe_queued_unopt(struct optimized_kprobe *op)
656 {
657 	struct optimized_kprobe *_op;
658 
659 	list_for_each_entry(_op, &unoptimizing_list, list) {
660 		if (op == _op)
661 			return true;
662 	}
663 
664 	return false;
665 }
666 
667 /* Optimize kprobe if p is ready to be optimized */
optimize_kprobe(struct kprobe * p)668 static void optimize_kprobe(struct kprobe *p)
669 {
670 	struct optimized_kprobe *op;
671 
672 	/* Check if the kprobe is disabled or not ready for optimization. */
673 	if (!kprobe_optready(p) || !kprobes_allow_optimization ||
674 	    (kprobe_disabled(p) || kprobes_all_disarmed))
675 		return;
676 
677 	/* kprobes with post_handler can not be optimized */
678 	if (p->post_handler)
679 		return;
680 
681 	op = container_of(p, struct optimized_kprobe, kp);
682 
683 	/* Check there is no other kprobes at the optimized instructions */
684 	if (arch_check_optimized_kprobe(op) < 0)
685 		return;
686 
687 	/* Check if it is already optimized. */
688 	if (op->kp.flags & KPROBE_FLAG_OPTIMIZED) {
689 		if (optprobe_queued_unopt(op)) {
690 			/* This is under unoptimizing. Just dequeue the probe */
691 			list_del_init(&op->list);
692 		}
693 		return;
694 	}
695 	op->kp.flags |= KPROBE_FLAG_OPTIMIZED;
696 
697 	/* On unoptimizing/optimizing_list, op must have OPTIMIZED flag */
698 	if (WARN_ON_ONCE(!list_empty(&op->list)))
699 		return;
700 
701 	list_add(&op->list, &optimizing_list);
702 	kick_kprobe_optimizer();
703 }
704 
705 /* Short cut to direct unoptimizing */
force_unoptimize_kprobe(struct optimized_kprobe * op)706 static void force_unoptimize_kprobe(struct optimized_kprobe *op)
707 {
708 	lockdep_assert_cpus_held();
709 	arch_unoptimize_kprobe(op);
710 	op->kp.flags &= ~KPROBE_FLAG_OPTIMIZED;
711 }
712 
713 /* Unoptimize a kprobe if p is optimized */
unoptimize_kprobe(struct kprobe * p,bool force)714 static void unoptimize_kprobe(struct kprobe *p, bool force)
715 {
716 	struct optimized_kprobe *op;
717 
718 	if (!kprobe_aggrprobe(p) || kprobe_disarmed(p))
719 		return; /* This is not an optprobe nor optimized */
720 
721 	op = container_of(p, struct optimized_kprobe, kp);
722 	if (!kprobe_optimized(p))
723 		return;
724 
725 	if (!list_empty(&op->list)) {
726 		if (optprobe_queued_unopt(op)) {
727 			/* Queued in unoptimizing queue */
728 			if (force) {
729 				/*
730 				 * Forcibly unoptimize the kprobe here, and queue it
731 				 * in the freeing list for release afterwards.
732 				 */
733 				force_unoptimize_kprobe(op);
734 				list_move(&op->list, &freeing_list);
735 			}
736 		} else {
737 			/* Dequeue from the optimizing queue */
738 			list_del_init(&op->list);
739 			op->kp.flags &= ~KPROBE_FLAG_OPTIMIZED;
740 		}
741 		return;
742 	}
743 
744 	/* Optimized kprobe case */
745 	if (force) {
746 		/* Forcibly update the code: this is a special case */
747 		force_unoptimize_kprobe(op);
748 	} else {
749 		list_add(&op->list, &unoptimizing_list);
750 		kick_kprobe_optimizer();
751 	}
752 }
753 
754 /* Cancel unoptimizing for reusing */
reuse_unused_kprobe(struct kprobe * ap)755 static int reuse_unused_kprobe(struct kprobe *ap)
756 {
757 	struct optimized_kprobe *op;
758 
759 	/*
760 	 * Unused kprobe MUST be on the way of delayed unoptimizing (means
761 	 * there is still a relative jump) and disabled.
762 	 */
763 	op = container_of(ap, struct optimized_kprobe, kp);
764 	WARN_ON_ONCE(list_empty(&op->list));
765 	/* Enable the probe again */
766 	ap->flags &= ~KPROBE_FLAG_DISABLED;
767 	/* Optimize it again (remove from op->list) */
768 	if (!kprobe_optready(ap))
769 		return -EINVAL;
770 
771 	optimize_kprobe(ap);
772 	return 0;
773 }
774 
775 /* Remove optimized instructions */
kill_optimized_kprobe(struct kprobe * p)776 static void kill_optimized_kprobe(struct kprobe *p)
777 {
778 	struct optimized_kprobe *op;
779 
780 	op = container_of(p, struct optimized_kprobe, kp);
781 	if (!list_empty(&op->list))
782 		/* Dequeue from the (un)optimization queue */
783 		list_del_init(&op->list);
784 	op->kp.flags &= ~KPROBE_FLAG_OPTIMIZED;
785 
786 	if (kprobe_unused(p)) {
787 		/* Enqueue if it is unused */
788 		list_add(&op->list, &freeing_list);
789 		/*
790 		 * Remove unused probes from the hash list. After waiting
791 		 * for synchronization, this probe is reclaimed.
792 		 * (reclaiming is done by do_free_cleaned_kprobes().)
793 		 */
794 		hlist_del_rcu(&op->kp.hlist);
795 	}
796 
797 	/* Don't touch the code, because it is already freed. */
798 	arch_remove_optimized_kprobe(op);
799 }
800 
801 static inline
__prepare_optimized_kprobe(struct optimized_kprobe * op,struct kprobe * p)802 void __prepare_optimized_kprobe(struct optimized_kprobe *op, struct kprobe *p)
803 {
804 	if (!kprobe_ftrace(p))
805 		arch_prepare_optimized_kprobe(op, p);
806 }
807 
808 /* Try to prepare optimized instructions */
prepare_optimized_kprobe(struct kprobe * p)809 static void prepare_optimized_kprobe(struct kprobe *p)
810 {
811 	struct optimized_kprobe *op;
812 
813 	op = container_of(p, struct optimized_kprobe, kp);
814 	__prepare_optimized_kprobe(op, p);
815 }
816 
817 /* Allocate new optimized_kprobe and try to prepare optimized instructions */
alloc_aggr_kprobe(struct kprobe * p)818 static struct kprobe *alloc_aggr_kprobe(struct kprobe *p)
819 {
820 	struct optimized_kprobe *op;
821 
822 	op = kzalloc(sizeof(struct optimized_kprobe), GFP_KERNEL);
823 	if (!op)
824 		return NULL;
825 
826 	INIT_LIST_HEAD(&op->list);
827 	op->kp.addr = p->addr;
828 	__prepare_optimized_kprobe(op, p);
829 
830 	return &op->kp;
831 }
832 
833 static void init_aggr_kprobe(struct kprobe *ap, struct kprobe *p);
834 
835 /*
836  * Prepare an optimized_kprobe and optimize it
837  * NOTE: p must be a normal registered kprobe
838  */
try_to_optimize_kprobe(struct kprobe * p)839 static void try_to_optimize_kprobe(struct kprobe *p)
840 {
841 	struct kprobe *ap;
842 	struct optimized_kprobe *op;
843 
844 	/* Impossible to optimize ftrace-based kprobe */
845 	if (kprobe_ftrace(p))
846 		return;
847 
848 	/* For preparing optimization, jump_label_text_reserved() is called */
849 	cpus_read_lock();
850 	jump_label_lock();
851 	mutex_lock(&text_mutex);
852 
853 	ap = alloc_aggr_kprobe(p);
854 	if (!ap)
855 		goto out;
856 
857 	op = container_of(ap, struct optimized_kprobe, kp);
858 	if (!arch_prepared_optinsn(&op->optinsn)) {
859 		/* If failed to setup optimizing, fallback to kprobe */
860 		arch_remove_optimized_kprobe(op);
861 		kfree(op);
862 		goto out;
863 	}
864 
865 	init_aggr_kprobe(ap, p);
866 	optimize_kprobe(ap);	/* This just kicks optimizer thread */
867 
868 out:
869 	mutex_unlock(&text_mutex);
870 	jump_label_unlock();
871 	cpus_read_unlock();
872 }
873 
optimize_all_kprobes(void)874 static void optimize_all_kprobes(void)
875 {
876 	struct hlist_head *head;
877 	struct kprobe *p;
878 	unsigned int i;
879 
880 	mutex_lock(&kprobe_mutex);
881 	/* If optimization is already allowed, just return */
882 	if (kprobes_allow_optimization)
883 		goto out;
884 
885 	cpus_read_lock();
886 	kprobes_allow_optimization = true;
887 	for (i = 0; i < KPROBE_TABLE_SIZE; i++) {
888 		head = &kprobe_table[i];
889 		hlist_for_each_entry(p, head, hlist)
890 			if (!kprobe_disabled(p))
891 				optimize_kprobe(p);
892 	}
893 	cpus_read_unlock();
894 	printk(KERN_INFO "Kprobes globally optimized\n");
895 out:
896 	mutex_unlock(&kprobe_mutex);
897 }
898 
899 #ifdef CONFIG_SYSCTL
unoptimize_all_kprobes(void)900 static void unoptimize_all_kprobes(void)
901 {
902 	struct hlist_head *head;
903 	struct kprobe *p;
904 	unsigned int i;
905 
906 	mutex_lock(&kprobe_mutex);
907 	/* If optimization is already prohibited, just return */
908 	if (!kprobes_allow_optimization) {
909 		mutex_unlock(&kprobe_mutex);
910 		return;
911 	}
912 
913 	cpus_read_lock();
914 	kprobes_allow_optimization = false;
915 	for (i = 0; i < KPROBE_TABLE_SIZE; i++) {
916 		head = &kprobe_table[i];
917 		hlist_for_each_entry(p, head, hlist) {
918 			if (!kprobe_disabled(p))
919 				unoptimize_kprobe(p, false);
920 		}
921 	}
922 	cpus_read_unlock();
923 	mutex_unlock(&kprobe_mutex);
924 
925 	/* Wait for unoptimizing completion */
926 	wait_for_kprobe_optimizer();
927 	printk(KERN_INFO "Kprobes globally unoptimized\n");
928 }
929 
930 static DEFINE_MUTEX(kprobe_sysctl_mutex);
931 int sysctl_kprobes_optimization;
proc_kprobes_optimization_handler(struct ctl_table * table,int write,void * buffer,size_t * length,loff_t * ppos)932 int proc_kprobes_optimization_handler(struct ctl_table *table, int write,
933 				      void *buffer, size_t *length,
934 				      loff_t *ppos)
935 {
936 	int ret;
937 
938 	mutex_lock(&kprobe_sysctl_mutex);
939 	sysctl_kprobes_optimization = kprobes_allow_optimization ? 1 : 0;
940 	ret = proc_dointvec_minmax(table, write, buffer, length, ppos);
941 
942 	if (sysctl_kprobes_optimization)
943 		optimize_all_kprobes();
944 	else
945 		unoptimize_all_kprobes();
946 	mutex_unlock(&kprobe_sysctl_mutex);
947 
948 	return ret;
949 }
950 #endif /* CONFIG_SYSCTL */
951 
952 /* Put a breakpoint for a probe. Must be called with text_mutex locked */
__arm_kprobe(struct kprobe * p)953 static void __arm_kprobe(struct kprobe *p)
954 {
955 	struct kprobe *_p;
956 
957 	/* Check collision with other optimized kprobes */
958 	_p = get_optimized_kprobe((unsigned long)p->addr);
959 	if (unlikely(_p))
960 		/* Fallback to unoptimized kprobe */
961 		unoptimize_kprobe(_p, true);
962 
963 	arch_arm_kprobe(p);
964 	optimize_kprobe(p);	/* Try to optimize (add kprobe to a list) */
965 }
966 
967 /* Remove the breakpoint of a probe. Must be called with text_mutex locked */
__disarm_kprobe(struct kprobe * p,bool reopt)968 static void __disarm_kprobe(struct kprobe *p, bool reopt)
969 {
970 	struct kprobe *_p;
971 
972 	/* Try to unoptimize */
973 	unoptimize_kprobe(p, kprobes_all_disarmed);
974 
975 	if (!kprobe_queued(p)) {
976 		arch_disarm_kprobe(p);
977 		/* If another kprobe was blocked, optimize it. */
978 		_p = get_optimized_kprobe((unsigned long)p->addr);
979 		if (unlikely(_p) && reopt)
980 			optimize_kprobe(_p);
981 	}
982 	/* TODO: reoptimize others after unoptimized this probe */
983 }
984 
985 #else /* !CONFIG_OPTPROBES */
986 
987 #define optimize_kprobe(p)			do {} while (0)
988 #define unoptimize_kprobe(p, f)			do {} while (0)
989 #define kill_optimized_kprobe(p)		do {} while (0)
990 #define prepare_optimized_kprobe(p)		do {} while (0)
991 #define try_to_optimize_kprobe(p)		do {} while (0)
992 #define __arm_kprobe(p)				arch_arm_kprobe(p)
993 #define __disarm_kprobe(p, o)			arch_disarm_kprobe(p)
994 #define kprobe_disarmed(p)			kprobe_disabled(p)
995 #define wait_for_kprobe_optimizer()		do {} while (0)
996 
reuse_unused_kprobe(struct kprobe * ap)997 static int reuse_unused_kprobe(struct kprobe *ap)
998 {
999 	/*
1000 	 * If the optimized kprobe is NOT supported, the aggr kprobe is
1001 	 * released at the same time that the last aggregated kprobe is
1002 	 * unregistered.
1003 	 * Thus there should be no chance to reuse unused kprobe.
1004 	 */
1005 	printk(KERN_ERR "Error: There should be no unused kprobe here.\n");
1006 	return -EINVAL;
1007 }
1008 
free_aggr_kprobe(struct kprobe * p)1009 static void free_aggr_kprobe(struct kprobe *p)
1010 {
1011 	arch_remove_kprobe(p);
1012 	kfree(p);
1013 }
1014 
alloc_aggr_kprobe(struct kprobe * p)1015 static struct kprobe *alloc_aggr_kprobe(struct kprobe *p)
1016 {
1017 	return kzalloc(sizeof(struct kprobe), GFP_KERNEL);
1018 }
1019 #endif /* CONFIG_OPTPROBES */
1020 
1021 #ifdef CONFIG_KPROBES_ON_FTRACE
1022 static struct ftrace_ops kprobe_ftrace_ops __read_mostly = {
1023 	.func = kprobe_ftrace_handler,
1024 	.flags = FTRACE_OPS_FL_SAVE_REGS,
1025 };
1026 
1027 static struct ftrace_ops kprobe_ipmodify_ops __read_mostly = {
1028 	.func = kprobe_ftrace_handler,
1029 	.flags = FTRACE_OPS_FL_SAVE_REGS | FTRACE_OPS_FL_IPMODIFY,
1030 };
1031 
1032 static int kprobe_ipmodify_enabled;
1033 static int kprobe_ftrace_enabled;
1034 
1035 /* Must ensure p->addr is really on ftrace */
prepare_kprobe(struct kprobe * p)1036 static int prepare_kprobe(struct kprobe *p)
1037 {
1038 	if (!kprobe_ftrace(p))
1039 		return arch_prepare_kprobe(p);
1040 
1041 	return arch_prepare_kprobe_ftrace(p);
1042 }
1043 
1044 /* Caller must lock kprobe_mutex */
__arm_kprobe_ftrace(struct kprobe * p,struct ftrace_ops * ops,int * cnt)1045 static int __arm_kprobe_ftrace(struct kprobe *p, struct ftrace_ops *ops,
1046 			       int *cnt)
1047 {
1048 	int ret = 0;
1049 
1050 	ret = ftrace_set_filter_ip(ops, (unsigned long)p->addr, 0, 0);
1051 	if (ret) {
1052 		pr_debug("Failed to arm kprobe-ftrace at %pS (%d)\n",
1053 			 p->addr, ret);
1054 		return ret;
1055 	}
1056 
1057 	if (*cnt == 0) {
1058 		ret = register_ftrace_function(ops);
1059 		if (ret) {
1060 			pr_debug("Failed to init kprobe-ftrace (%d)\n", ret);
1061 			goto err_ftrace;
1062 		}
1063 	}
1064 
1065 	(*cnt)++;
1066 	return ret;
1067 
1068 err_ftrace:
1069 	/*
1070 	 * At this point, sinec ops is not registered, we should be sefe from
1071 	 * registering empty filter.
1072 	 */
1073 	ftrace_set_filter_ip(ops, (unsigned long)p->addr, 1, 0);
1074 	return ret;
1075 }
1076 
arm_kprobe_ftrace(struct kprobe * p)1077 static int arm_kprobe_ftrace(struct kprobe *p)
1078 {
1079 	bool ipmodify = (p->post_handler != NULL);
1080 
1081 	return __arm_kprobe_ftrace(p,
1082 		ipmodify ? &kprobe_ipmodify_ops : &kprobe_ftrace_ops,
1083 		ipmodify ? &kprobe_ipmodify_enabled : &kprobe_ftrace_enabled);
1084 }
1085 
1086 /* Caller must lock kprobe_mutex */
__disarm_kprobe_ftrace(struct kprobe * p,struct ftrace_ops * ops,int * cnt)1087 static int __disarm_kprobe_ftrace(struct kprobe *p, struct ftrace_ops *ops,
1088 				  int *cnt)
1089 {
1090 	int ret = 0;
1091 
1092 	if (*cnt == 1) {
1093 		ret = unregister_ftrace_function(ops);
1094 		if (WARN(ret < 0, "Failed to unregister kprobe-ftrace (%d)\n", ret))
1095 			return ret;
1096 	}
1097 
1098 	(*cnt)--;
1099 
1100 	ret = ftrace_set_filter_ip(ops, (unsigned long)p->addr, 1, 0);
1101 	WARN_ONCE(ret < 0, "Failed to disarm kprobe-ftrace at %pS (%d)\n",
1102 		  p->addr, ret);
1103 	return ret;
1104 }
1105 
disarm_kprobe_ftrace(struct kprobe * p)1106 static int disarm_kprobe_ftrace(struct kprobe *p)
1107 {
1108 	bool ipmodify = (p->post_handler != NULL);
1109 
1110 	return __disarm_kprobe_ftrace(p,
1111 		ipmodify ? &kprobe_ipmodify_ops : &kprobe_ftrace_ops,
1112 		ipmodify ? &kprobe_ipmodify_enabled : &kprobe_ftrace_enabled);
1113 }
1114 #else	/* !CONFIG_KPROBES_ON_FTRACE */
prepare_kprobe(struct kprobe * p)1115 static inline int prepare_kprobe(struct kprobe *p)
1116 {
1117 	return arch_prepare_kprobe(p);
1118 }
1119 
arm_kprobe_ftrace(struct kprobe * p)1120 static inline int arm_kprobe_ftrace(struct kprobe *p)
1121 {
1122 	return -ENODEV;
1123 }
1124 
disarm_kprobe_ftrace(struct kprobe * p)1125 static inline int disarm_kprobe_ftrace(struct kprobe *p)
1126 {
1127 	return -ENODEV;
1128 }
1129 #endif
1130 
1131 /* Arm a kprobe with text_mutex */
arm_kprobe(struct kprobe * kp)1132 static int arm_kprobe(struct kprobe *kp)
1133 {
1134 	if (unlikely(kprobe_ftrace(kp)))
1135 		return arm_kprobe_ftrace(kp);
1136 
1137 	cpus_read_lock();
1138 	mutex_lock(&text_mutex);
1139 	__arm_kprobe(kp);
1140 	mutex_unlock(&text_mutex);
1141 	cpus_read_unlock();
1142 
1143 	return 0;
1144 }
1145 
1146 /* Disarm a kprobe with text_mutex */
disarm_kprobe(struct kprobe * kp,bool reopt)1147 static int disarm_kprobe(struct kprobe *kp, bool reopt)
1148 {
1149 	if (unlikely(kprobe_ftrace(kp)))
1150 		return disarm_kprobe_ftrace(kp);
1151 
1152 	cpus_read_lock();
1153 	mutex_lock(&text_mutex);
1154 	__disarm_kprobe(kp, reopt);
1155 	mutex_unlock(&text_mutex);
1156 	cpus_read_unlock();
1157 
1158 	return 0;
1159 }
1160 
1161 /*
1162  * Aggregate handlers for multiple kprobes support - these handlers
1163  * take care of invoking the individual kprobe handlers on p->list
1164  */
aggr_pre_handler(struct kprobe * p,struct pt_regs * regs)1165 static int aggr_pre_handler(struct kprobe *p, struct pt_regs *regs)
1166 {
1167 	struct kprobe *kp;
1168 
1169 	list_for_each_entry_rcu(kp, &p->list, list) {
1170 		if (kp->pre_handler && likely(!kprobe_disabled(kp))) {
1171 			set_kprobe_instance(kp);
1172 			if (kp->pre_handler(kp, regs))
1173 				return 1;
1174 		}
1175 		reset_kprobe_instance();
1176 	}
1177 	return 0;
1178 }
1179 NOKPROBE_SYMBOL(aggr_pre_handler);
1180 
aggr_post_handler(struct kprobe * p,struct pt_regs * regs,unsigned long flags)1181 static void aggr_post_handler(struct kprobe *p, struct pt_regs *regs,
1182 			      unsigned long flags)
1183 {
1184 	struct kprobe *kp;
1185 
1186 	list_for_each_entry_rcu(kp, &p->list, list) {
1187 		if (kp->post_handler && likely(!kprobe_disabled(kp))) {
1188 			set_kprobe_instance(kp);
1189 			kp->post_handler(kp, regs, flags);
1190 			reset_kprobe_instance();
1191 		}
1192 	}
1193 }
1194 NOKPROBE_SYMBOL(aggr_post_handler);
1195 
aggr_fault_handler(struct kprobe * p,struct pt_regs * regs,int trapnr)1196 static int aggr_fault_handler(struct kprobe *p, struct pt_regs *regs,
1197 			      int trapnr)
1198 {
1199 	struct kprobe *cur = __this_cpu_read(kprobe_instance);
1200 
1201 	/*
1202 	 * if we faulted "during" the execution of a user specified
1203 	 * probe handler, invoke just that probe's fault handler
1204 	 */
1205 	if (cur && cur->fault_handler) {
1206 		if (cur->fault_handler(cur, regs, trapnr))
1207 			return 1;
1208 	}
1209 	return 0;
1210 }
1211 NOKPROBE_SYMBOL(aggr_fault_handler);
1212 
1213 /* Walks the list and increments nmissed count for multiprobe case */
kprobes_inc_nmissed_count(struct kprobe * p)1214 void kprobes_inc_nmissed_count(struct kprobe *p)
1215 {
1216 	struct kprobe *kp;
1217 	if (!kprobe_aggrprobe(p)) {
1218 		p->nmissed++;
1219 	} else {
1220 		list_for_each_entry_rcu(kp, &p->list, list)
1221 			kp->nmissed++;
1222 	}
1223 	return;
1224 }
1225 NOKPROBE_SYMBOL(kprobes_inc_nmissed_count);
1226 
recycle_rp_inst(struct kretprobe_instance * ri)1227 static void recycle_rp_inst(struct kretprobe_instance *ri)
1228 {
1229 	struct kretprobe *rp = ri->rp;
1230 
1231 	/* remove rp inst off the rprobe_inst_table */
1232 	hlist_del(&ri->hlist);
1233 	INIT_HLIST_NODE(&ri->hlist);
1234 	if (likely(rp)) {
1235 		raw_spin_lock(&rp->lock);
1236 		hlist_add_head(&ri->hlist, &rp->free_instances);
1237 		raw_spin_unlock(&rp->lock);
1238 	} else
1239 		kfree_rcu(ri, rcu);
1240 }
1241 NOKPROBE_SYMBOL(recycle_rp_inst);
1242 
kretprobe_hash_lock(struct task_struct * tsk,struct hlist_head ** head,unsigned long * flags)1243 static void kretprobe_hash_lock(struct task_struct *tsk,
1244 			 struct hlist_head **head, unsigned long *flags)
1245 __acquires(hlist_lock)
1246 {
1247 	unsigned long hash = hash_ptr(tsk, KPROBE_HASH_BITS);
1248 	raw_spinlock_t *hlist_lock;
1249 
1250 	*head = &kretprobe_inst_table[hash];
1251 	hlist_lock = kretprobe_table_lock_ptr(hash);
1252 	/*
1253 	 * Nested is a workaround that will soon not be needed.
1254 	 * There's other protections that make sure the same lock
1255 	 * is not taken on the same CPU that lockdep is unaware of.
1256 	 * Differentiate when it is taken in NMI context.
1257 	 */
1258 	raw_spin_lock_irqsave_nested(hlist_lock, *flags, !!in_nmi());
1259 }
1260 NOKPROBE_SYMBOL(kretprobe_hash_lock);
1261 
kretprobe_table_lock(unsigned long hash,unsigned long * flags)1262 static void kretprobe_table_lock(unsigned long hash,
1263 				 unsigned long *flags)
1264 __acquires(hlist_lock)
1265 {
1266 	raw_spinlock_t *hlist_lock = kretprobe_table_lock_ptr(hash);
1267 	/*
1268 	 * Nested is a workaround that will soon not be needed.
1269 	 * There's other protections that make sure the same lock
1270 	 * is not taken on the same CPU that lockdep is unaware of.
1271 	 * Differentiate when it is taken in NMI context.
1272 	 */
1273 	raw_spin_lock_irqsave_nested(hlist_lock, *flags, !!in_nmi());
1274 }
1275 NOKPROBE_SYMBOL(kretprobe_table_lock);
1276 
kretprobe_hash_unlock(struct task_struct * tsk,unsigned long * flags)1277 static void kretprobe_hash_unlock(struct task_struct *tsk,
1278 			   unsigned long *flags)
1279 __releases(hlist_lock)
1280 {
1281 	unsigned long hash = hash_ptr(tsk, KPROBE_HASH_BITS);
1282 	raw_spinlock_t *hlist_lock;
1283 
1284 	hlist_lock = kretprobe_table_lock_ptr(hash);
1285 	raw_spin_unlock_irqrestore(hlist_lock, *flags);
1286 }
1287 NOKPROBE_SYMBOL(kretprobe_hash_unlock);
1288 
kretprobe_table_unlock(unsigned long hash,unsigned long * flags)1289 static void kretprobe_table_unlock(unsigned long hash,
1290 				   unsigned long *flags)
1291 __releases(hlist_lock)
1292 {
1293 	raw_spinlock_t *hlist_lock = kretprobe_table_lock_ptr(hash);
1294 	raw_spin_unlock_irqrestore(hlist_lock, *flags);
1295 }
1296 NOKPROBE_SYMBOL(kretprobe_table_unlock);
1297 
1298 static struct kprobe kprobe_busy = {
1299 	.addr = (void *) get_kprobe,
1300 };
1301 
kprobe_busy_begin(void)1302 void kprobe_busy_begin(void)
1303 {
1304 	struct kprobe_ctlblk *kcb;
1305 
1306 	preempt_disable();
1307 	__this_cpu_write(current_kprobe, &kprobe_busy);
1308 	kcb = get_kprobe_ctlblk();
1309 	kcb->kprobe_status = KPROBE_HIT_ACTIVE;
1310 }
1311 
kprobe_busy_end(void)1312 void kprobe_busy_end(void)
1313 {
1314 	__this_cpu_write(current_kprobe, NULL);
1315 	preempt_enable();
1316 }
1317 
1318 /*
1319  * This function is called from finish_task_switch when task tk becomes dead,
1320  * so that we can recycle any function-return probe instances associated
1321  * with this task. These left over instances represent probed functions
1322  * that have been called but will never return.
1323  */
kprobe_flush_task(struct task_struct * tk)1324 void kprobe_flush_task(struct task_struct *tk)
1325 {
1326 	struct kretprobe_instance *ri;
1327 	struct hlist_head *head;
1328 	struct hlist_node *tmp;
1329 	unsigned long hash, flags = 0;
1330 
1331 	if (unlikely(!kprobes_initialized))
1332 		/* Early boot.  kretprobe_table_locks not yet initialized. */
1333 		return;
1334 
1335 	kprobe_busy_begin();
1336 
1337 	hash = hash_ptr(tk, KPROBE_HASH_BITS);
1338 	head = &kretprobe_inst_table[hash];
1339 	kretprobe_table_lock(hash, &flags);
1340 	hlist_for_each_entry_safe(ri, tmp, head, hlist) {
1341 		if (ri->task == tk)
1342 			recycle_rp_inst(ri);
1343 	}
1344 	kretprobe_table_unlock(hash, &flags);
1345 
1346 	kprobe_busy_end();
1347 }
1348 NOKPROBE_SYMBOL(kprobe_flush_task);
1349 
free_rp_inst(struct kretprobe * rp)1350 static inline void free_rp_inst(struct kretprobe *rp)
1351 {
1352 	struct kretprobe_instance *ri;
1353 	struct hlist_node *next;
1354 
1355 	hlist_for_each_entry_safe(ri, next, &rp->free_instances, hlist) {
1356 		hlist_del(&ri->hlist);
1357 		kfree(ri);
1358 	}
1359 }
1360 
cleanup_rp_inst(struct kretprobe * rp)1361 static void cleanup_rp_inst(struct kretprobe *rp)
1362 {
1363 	unsigned long flags, hash;
1364 	struct kretprobe_instance *ri;
1365 	struct hlist_node *next;
1366 	struct hlist_head *head;
1367 
1368 	/* To avoid recursive kretprobe by NMI, set kprobe busy here */
1369 	kprobe_busy_begin();
1370 	for (hash = 0; hash < KPROBE_TABLE_SIZE; hash++) {
1371 		kretprobe_table_lock(hash, &flags);
1372 		head = &kretprobe_inst_table[hash];
1373 		hlist_for_each_entry_safe(ri, next, head, hlist) {
1374 			if (ri->rp == rp)
1375 				ri->rp = NULL;
1376 		}
1377 		kretprobe_table_unlock(hash, &flags);
1378 	}
1379 	kprobe_busy_end();
1380 
1381 	free_rp_inst(rp);
1382 }
1383 NOKPROBE_SYMBOL(cleanup_rp_inst);
1384 
1385 /* Add the new probe to ap->list */
add_new_kprobe(struct kprobe * ap,struct kprobe * p)1386 static int add_new_kprobe(struct kprobe *ap, struct kprobe *p)
1387 {
1388 	if (p->post_handler)
1389 		unoptimize_kprobe(ap, true);	/* Fall back to normal kprobe */
1390 
1391 	list_add_rcu(&p->list, &ap->list);
1392 	if (p->post_handler && !ap->post_handler)
1393 		ap->post_handler = aggr_post_handler;
1394 
1395 	return 0;
1396 }
1397 
1398 /*
1399  * Fill in the required fields of the "manager kprobe". Replace the
1400  * earlier kprobe in the hlist with the manager kprobe
1401  */
init_aggr_kprobe(struct kprobe * ap,struct kprobe * p)1402 static void init_aggr_kprobe(struct kprobe *ap, struct kprobe *p)
1403 {
1404 	/* Copy p's insn slot to ap */
1405 	copy_kprobe(p, ap);
1406 	flush_insn_slot(ap);
1407 	ap->addr = p->addr;
1408 	ap->flags = p->flags & ~KPROBE_FLAG_OPTIMIZED;
1409 	ap->pre_handler = aggr_pre_handler;
1410 	ap->fault_handler = aggr_fault_handler;
1411 	/* We don't care the kprobe which has gone. */
1412 	if (p->post_handler && !kprobe_gone(p))
1413 		ap->post_handler = aggr_post_handler;
1414 
1415 	INIT_LIST_HEAD(&ap->list);
1416 	INIT_HLIST_NODE(&ap->hlist);
1417 
1418 	list_add_rcu(&p->list, &ap->list);
1419 	hlist_replace_rcu(&p->hlist, &ap->hlist);
1420 }
1421 
1422 /*
1423  * This is the second or subsequent kprobe at the address - handle
1424  * the intricacies
1425  */
register_aggr_kprobe(struct kprobe * orig_p,struct kprobe * p)1426 static int register_aggr_kprobe(struct kprobe *orig_p, struct kprobe *p)
1427 {
1428 	int ret = 0;
1429 	struct kprobe *ap = orig_p;
1430 
1431 	cpus_read_lock();
1432 
1433 	/* For preparing optimization, jump_label_text_reserved() is called */
1434 	jump_label_lock();
1435 	mutex_lock(&text_mutex);
1436 
1437 	if (!kprobe_aggrprobe(orig_p)) {
1438 		/* If orig_p is not an aggr_kprobe, create new aggr_kprobe. */
1439 		ap = alloc_aggr_kprobe(orig_p);
1440 		if (!ap) {
1441 			ret = -ENOMEM;
1442 			goto out;
1443 		}
1444 		init_aggr_kprobe(ap, orig_p);
1445 	} else if (kprobe_unused(ap)) {
1446 		/* This probe is going to die. Rescue it */
1447 		ret = reuse_unused_kprobe(ap);
1448 		if (ret)
1449 			goto out;
1450 	}
1451 
1452 	if (kprobe_gone(ap)) {
1453 		/*
1454 		 * Attempting to insert new probe at the same location that
1455 		 * had a probe in the module vaddr area which already
1456 		 * freed. So, the instruction slot has already been
1457 		 * released. We need a new slot for the new probe.
1458 		 */
1459 		ret = arch_prepare_kprobe(ap);
1460 		if (ret)
1461 			/*
1462 			 * Even if fail to allocate new slot, don't need to
1463 			 * free aggr_probe. It will be used next time, or
1464 			 * freed by unregister_kprobe.
1465 			 */
1466 			goto out;
1467 
1468 		/* Prepare optimized instructions if possible. */
1469 		prepare_optimized_kprobe(ap);
1470 
1471 		/*
1472 		 * Clear gone flag to prevent allocating new slot again, and
1473 		 * set disabled flag because it is not armed yet.
1474 		 */
1475 		ap->flags = (ap->flags & ~KPROBE_FLAG_GONE)
1476 			    | KPROBE_FLAG_DISABLED;
1477 	}
1478 
1479 	/* Copy ap's insn slot to p */
1480 	copy_kprobe(ap, p);
1481 	ret = add_new_kprobe(ap, p);
1482 
1483 out:
1484 	mutex_unlock(&text_mutex);
1485 	jump_label_unlock();
1486 	cpus_read_unlock();
1487 
1488 	if (ret == 0 && kprobe_disabled(ap) && !kprobe_disabled(p)) {
1489 		ap->flags &= ~KPROBE_FLAG_DISABLED;
1490 		if (!kprobes_all_disarmed) {
1491 			/* Arm the breakpoint again. */
1492 			ret = arm_kprobe(ap);
1493 			if (ret) {
1494 				ap->flags |= KPROBE_FLAG_DISABLED;
1495 				list_del_rcu(&p->list);
1496 				synchronize_rcu();
1497 			}
1498 		}
1499 	}
1500 	return ret;
1501 }
1502 
arch_within_kprobe_blacklist(unsigned long addr)1503 bool __weak arch_within_kprobe_blacklist(unsigned long addr)
1504 {
1505 	/* The __kprobes marked functions and entry code must not be probed */
1506 	return addr >= (unsigned long)__kprobes_text_start &&
1507 	       addr < (unsigned long)__kprobes_text_end;
1508 }
1509 
__within_kprobe_blacklist(unsigned long addr)1510 static bool __within_kprobe_blacklist(unsigned long addr)
1511 {
1512 	struct kprobe_blacklist_entry *ent;
1513 
1514 	if (arch_within_kprobe_blacklist(addr))
1515 		return true;
1516 	/*
1517 	 * If there exists a kprobe_blacklist, verify and
1518 	 * fail any probe registration in the prohibited area
1519 	 */
1520 	list_for_each_entry(ent, &kprobe_blacklist, list) {
1521 		if (addr >= ent->start_addr && addr < ent->end_addr)
1522 			return true;
1523 	}
1524 	return false;
1525 }
1526 
within_kprobe_blacklist(unsigned long addr)1527 bool within_kprobe_blacklist(unsigned long addr)
1528 {
1529 	char symname[KSYM_NAME_LEN], *p;
1530 
1531 	if (__within_kprobe_blacklist(addr))
1532 		return true;
1533 
1534 	/* Check if the address is on a suffixed-symbol */
1535 	if (!lookup_symbol_name(addr, symname)) {
1536 		p = strchr(symname, '.');
1537 		if (!p)
1538 			return false;
1539 		*p = '\0';
1540 		addr = (unsigned long)kprobe_lookup_name(symname, 0);
1541 		if (addr)
1542 			return __within_kprobe_blacklist(addr);
1543 	}
1544 	return false;
1545 }
1546 
1547 /*
1548  * If we have a symbol_name argument, look it up and add the offset field
1549  * to it. This way, we can specify a relative address to a symbol.
1550  * This returns encoded errors if it fails to look up symbol or invalid
1551  * combination of parameters.
1552  */
_kprobe_addr(kprobe_opcode_t * addr,const char * symbol_name,unsigned int offset)1553 static kprobe_opcode_t *_kprobe_addr(kprobe_opcode_t *addr,
1554 			const char *symbol_name, unsigned int offset)
1555 {
1556 	if ((symbol_name && addr) || (!symbol_name && !addr))
1557 		goto invalid;
1558 
1559 	if (symbol_name) {
1560 		addr = kprobe_lookup_name(symbol_name, offset);
1561 		if (!addr)
1562 			return ERR_PTR(-ENOENT);
1563 	}
1564 
1565 	addr = (kprobe_opcode_t *)(((char *)addr) + offset);
1566 	if (addr)
1567 		return addr;
1568 
1569 invalid:
1570 	return ERR_PTR(-EINVAL);
1571 }
1572 
kprobe_addr(struct kprobe * p)1573 static kprobe_opcode_t *kprobe_addr(struct kprobe *p)
1574 {
1575 	return _kprobe_addr(p->addr, p->symbol_name, p->offset);
1576 }
1577 
1578 /* Check passed kprobe is valid and return kprobe in kprobe_table. */
__get_valid_kprobe(struct kprobe * p)1579 static struct kprobe *__get_valid_kprobe(struct kprobe *p)
1580 {
1581 	struct kprobe *ap, *list_p;
1582 
1583 	lockdep_assert_held(&kprobe_mutex);
1584 
1585 	ap = get_kprobe(p->addr);
1586 	if (unlikely(!ap))
1587 		return NULL;
1588 
1589 	if (p != ap) {
1590 		list_for_each_entry(list_p, &ap->list, list)
1591 			if (list_p == p)
1592 			/* kprobe p is a valid probe */
1593 				goto valid;
1594 		return NULL;
1595 	}
1596 valid:
1597 	return ap;
1598 }
1599 
1600 /* Return error if the kprobe is being re-registered */
check_kprobe_rereg(struct kprobe * p)1601 static inline int check_kprobe_rereg(struct kprobe *p)
1602 {
1603 	int ret = 0;
1604 
1605 	mutex_lock(&kprobe_mutex);
1606 	if (__get_valid_kprobe(p))
1607 		ret = -EINVAL;
1608 	mutex_unlock(&kprobe_mutex);
1609 
1610 	return ret;
1611 }
1612 
arch_check_ftrace_location(struct kprobe * p)1613 int __weak arch_check_ftrace_location(struct kprobe *p)
1614 {
1615 	unsigned long ftrace_addr;
1616 
1617 	ftrace_addr = ftrace_location((unsigned long)p->addr);
1618 	if (ftrace_addr) {
1619 #ifdef CONFIG_KPROBES_ON_FTRACE
1620 		/* Given address is not on the instruction boundary */
1621 		if ((unsigned long)p->addr != ftrace_addr)
1622 			return -EILSEQ;
1623 		p->flags |= KPROBE_FLAG_FTRACE;
1624 #else	/* !CONFIG_KPROBES_ON_FTRACE */
1625 		return -EINVAL;
1626 #endif
1627 	}
1628 	return 0;
1629 }
1630 
check_kprobe_address_safe(struct kprobe * p,struct module ** probed_mod)1631 static int check_kprobe_address_safe(struct kprobe *p,
1632 				     struct module **probed_mod)
1633 {
1634 	int ret;
1635 
1636 	ret = arch_check_ftrace_location(p);
1637 	if (ret)
1638 		return ret;
1639 	jump_label_lock();
1640 	preempt_disable();
1641 
1642 	/* Ensure it is not in reserved area nor out of text */
1643 	if (!(core_kernel_text((unsigned long) p->addr) ||
1644 	    is_module_text_address((unsigned long) p->addr)) ||
1645 	    in_gate_area_no_mm((unsigned long) p->addr) ||
1646 	    within_kprobe_blacklist((unsigned long) p->addr) ||
1647 	    jump_label_text_reserved(p->addr, p->addr) ||
1648 	    static_call_text_reserved(p->addr, p->addr) ||
1649 	    find_bug((unsigned long)p->addr)) {
1650 		ret = -EINVAL;
1651 		goto out;
1652 	}
1653 
1654 	/* Check if are we probing a module */
1655 	*probed_mod = __module_text_address((unsigned long) p->addr);
1656 	if (*probed_mod) {
1657 		/*
1658 		 * We must hold a refcount of the probed module while updating
1659 		 * its code to prohibit unexpected unloading.
1660 		 */
1661 		if (unlikely(!try_module_get(*probed_mod))) {
1662 			ret = -ENOENT;
1663 			goto out;
1664 		}
1665 
1666 		/*
1667 		 * If the module freed .init.text, we couldn't insert
1668 		 * kprobes in there.
1669 		 */
1670 		if (within_module_init((unsigned long)p->addr, *probed_mod) &&
1671 		    (*probed_mod)->state != MODULE_STATE_COMING) {
1672 			module_put(*probed_mod);
1673 			*probed_mod = NULL;
1674 			ret = -ENOENT;
1675 		}
1676 	}
1677 out:
1678 	preempt_enable();
1679 	jump_label_unlock();
1680 
1681 	return ret;
1682 }
1683 
register_kprobe(struct kprobe * p)1684 int register_kprobe(struct kprobe *p)
1685 {
1686 	int ret;
1687 	struct kprobe *old_p;
1688 	struct module *probed_mod;
1689 	kprobe_opcode_t *addr;
1690 
1691 	/* Adjust probe address from symbol */
1692 	addr = kprobe_addr(p);
1693 	if (IS_ERR(addr))
1694 		return PTR_ERR(addr);
1695 	p->addr = addr;
1696 
1697 	ret = check_kprobe_rereg(p);
1698 	if (ret)
1699 		return ret;
1700 
1701 	/* User can pass only KPROBE_FLAG_DISABLED to register_kprobe */
1702 	p->flags &= KPROBE_FLAG_DISABLED;
1703 	p->nmissed = 0;
1704 	INIT_LIST_HEAD(&p->list);
1705 
1706 	ret = check_kprobe_address_safe(p, &probed_mod);
1707 	if (ret)
1708 		return ret;
1709 
1710 	mutex_lock(&kprobe_mutex);
1711 
1712 	old_p = get_kprobe(p->addr);
1713 	if (old_p) {
1714 		/* Since this may unoptimize old_p, locking text_mutex. */
1715 		ret = register_aggr_kprobe(old_p, p);
1716 		goto out;
1717 	}
1718 
1719 	cpus_read_lock();
1720 	/* Prevent text modification */
1721 	mutex_lock(&text_mutex);
1722 	ret = prepare_kprobe(p);
1723 	mutex_unlock(&text_mutex);
1724 	cpus_read_unlock();
1725 	if (ret)
1726 		goto out;
1727 
1728 	INIT_HLIST_NODE(&p->hlist);
1729 	hlist_add_head_rcu(&p->hlist,
1730 		       &kprobe_table[hash_ptr(p->addr, KPROBE_HASH_BITS)]);
1731 
1732 	if (!kprobes_all_disarmed && !kprobe_disabled(p)) {
1733 		ret = arm_kprobe(p);
1734 		if (ret) {
1735 			hlist_del_rcu(&p->hlist);
1736 			synchronize_rcu();
1737 			goto out;
1738 		}
1739 	}
1740 
1741 	/* Try to optimize kprobe */
1742 	try_to_optimize_kprobe(p);
1743 out:
1744 	mutex_unlock(&kprobe_mutex);
1745 
1746 	if (probed_mod)
1747 		module_put(probed_mod);
1748 
1749 	return ret;
1750 }
1751 EXPORT_SYMBOL_GPL(register_kprobe);
1752 
1753 /* Check if all probes on the aggrprobe are disabled */
aggr_kprobe_disabled(struct kprobe * ap)1754 static int aggr_kprobe_disabled(struct kprobe *ap)
1755 {
1756 	struct kprobe *kp;
1757 
1758 	lockdep_assert_held(&kprobe_mutex);
1759 
1760 	list_for_each_entry(kp, &ap->list, list)
1761 		if (!kprobe_disabled(kp))
1762 			/*
1763 			 * There is an active probe on the list.
1764 			 * We can't disable this ap.
1765 			 */
1766 			return 0;
1767 
1768 	return 1;
1769 }
1770 
1771 /* Disable one kprobe: Make sure called under kprobe_mutex is locked */
__disable_kprobe(struct kprobe * p)1772 static struct kprobe *__disable_kprobe(struct kprobe *p)
1773 {
1774 	struct kprobe *orig_p;
1775 	int ret;
1776 
1777 	/* Get an original kprobe for return */
1778 	orig_p = __get_valid_kprobe(p);
1779 	if (unlikely(orig_p == NULL))
1780 		return ERR_PTR(-EINVAL);
1781 
1782 	if (!kprobe_disabled(p)) {
1783 		/* Disable probe if it is a child probe */
1784 		if (p != orig_p)
1785 			p->flags |= KPROBE_FLAG_DISABLED;
1786 
1787 		/* Try to disarm and disable this/parent probe */
1788 		if (p == orig_p || aggr_kprobe_disabled(orig_p)) {
1789 			/*
1790 			 * Don't be lazy here.  Even if 'kprobes_all_disarmed'
1791 			 * is false, 'orig_p' might not have been armed yet.
1792 			 * Note arm_all_kprobes() __tries__ to arm all kprobes
1793 			 * on the best effort basis.
1794 			 */
1795 			if (!kprobes_all_disarmed && !kprobe_disabled(orig_p)) {
1796 				ret = disarm_kprobe(orig_p, true);
1797 				if (ret) {
1798 					p->flags &= ~KPROBE_FLAG_DISABLED;
1799 					return ERR_PTR(ret);
1800 				}
1801 			}
1802 			orig_p->flags |= KPROBE_FLAG_DISABLED;
1803 		}
1804 	}
1805 
1806 	return orig_p;
1807 }
1808 
1809 /*
1810  * Unregister a kprobe without a scheduler synchronization.
1811  */
__unregister_kprobe_top(struct kprobe * p)1812 static int __unregister_kprobe_top(struct kprobe *p)
1813 {
1814 	struct kprobe *ap, *list_p;
1815 
1816 	/* Disable kprobe. This will disarm it if needed. */
1817 	ap = __disable_kprobe(p);
1818 	if (IS_ERR(ap))
1819 		return PTR_ERR(ap);
1820 
1821 	if (ap == p)
1822 		/*
1823 		 * This probe is an independent(and non-optimized) kprobe
1824 		 * (not an aggrprobe). Remove from the hash list.
1825 		 */
1826 		goto disarmed;
1827 
1828 	/* Following process expects this probe is an aggrprobe */
1829 	WARN_ON(!kprobe_aggrprobe(ap));
1830 
1831 	if (list_is_singular(&ap->list) && kprobe_disarmed(ap))
1832 		/*
1833 		 * !disarmed could be happen if the probe is under delayed
1834 		 * unoptimizing.
1835 		 */
1836 		goto disarmed;
1837 	else {
1838 		/* If disabling probe has special handlers, update aggrprobe */
1839 		if (p->post_handler && !kprobe_gone(p)) {
1840 			list_for_each_entry(list_p, &ap->list, list) {
1841 				if ((list_p != p) && (list_p->post_handler))
1842 					goto noclean;
1843 			}
1844 			/*
1845 			 * For the kprobe-on-ftrace case, we keep the
1846 			 * post_handler setting to identify this aggrprobe
1847 			 * armed with kprobe_ipmodify_ops.
1848 			 */
1849 			if (!kprobe_ftrace(ap))
1850 				ap->post_handler = NULL;
1851 		}
1852 noclean:
1853 		/*
1854 		 * Remove from the aggrprobe: this path will do nothing in
1855 		 * __unregister_kprobe_bottom().
1856 		 */
1857 		list_del_rcu(&p->list);
1858 		if (!kprobe_disabled(ap) && !kprobes_all_disarmed)
1859 			/*
1860 			 * Try to optimize this probe again, because post
1861 			 * handler may have been changed.
1862 			 */
1863 			optimize_kprobe(ap);
1864 	}
1865 	return 0;
1866 
1867 disarmed:
1868 	hlist_del_rcu(&ap->hlist);
1869 	return 0;
1870 }
1871 
__unregister_kprobe_bottom(struct kprobe * p)1872 static void __unregister_kprobe_bottom(struct kprobe *p)
1873 {
1874 	struct kprobe *ap;
1875 
1876 	if (list_empty(&p->list))
1877 		/* This is an independent kprobe */
1878 		arch_remove_kprobe(p);
1879 	else if (list_is_singular(&p->list)) {
1880 		/* This is the last child of an aggrprobe */
1881 		ap = list_entry(p->list.next, struct kprobe, list);
1882 		list_del(&p->list);
1883 		free_aggr_kprobe(ap);
1884 	}
1885 	/* Otherwise, do nothing. */
1886 }
1887 
register_kprobes(struct kprobe ** kps,int num)1888 int register_kprobes(struct kprobe **kps, int num)
1889 {
1890 	int i, ret = 0;
1891 
1892 	if (num <= 0)
1893 		return -EINVAL;
1894 	for (i = 0; i < num; i++) {
1895 		ret = register_kprobe(kps[i]);
1896 		if (ret < 0) {
1897 			if (i > 0)
1898 				unregister_kprobes(kps, i);
1899 			break;
1900 		}
1901 	}
1902 	return ret;
1903 }
1904 EXPORT_SYMBOL_GPL(register_kprobes);
1905 
unregister_kprobe(struct kprobe * p)1906 void unregister_kprobe(struct kprobe *p)
1907 {
1908 	unregister_kprobes(&p, 1);
1909 }
1910 EXPORT_SYMBOL_GPL(unregister_kprobe);
1911 
unregister_kprobes(struct kprobe ** kps,int num)1912 void unregister_kprobes(struct kprobe **kps, int num)
1913 {
1914 	int i;
1915 
1916 	if (num <= 0)
1917 		return;
1918 	mutex_lock(&kprobe_mutex);
1919 	for (i = 0; i < num; i++)
1920 		if (__unregister_kprobe_top(kps[i]) < 0)
1921 			kps[i]->addr = NULL;
1922 	mutex_unlock(&kprobe_mutex);
1923 
1924 	synchronize_rcu();
1925 	for (i = 0; i < num; i++)
1926 		if (kps[i]->addr)
1927 			__unregister_kprobe_bottom(kps[i]);
1928 }
1929 EXPORT_SYMBOL_GPL(unregister_kprobes);
1930 
kprobe_exceptions_notify(struct notifier_block * self,unsigned long val,void * data)1931 int __weak kprobe_exceptions_notify(struct notifier_block *self,
1932 					unsigned long val, void *data)
1933 {
1934 	return NOTIFY_DONE;
1935 }
1936 NOKPROBE_SYMBOL(kprobe_exceptions_notify);
1937 
1938 static struct notifier_block kprobe_exceptions_nb = {
1939 	.notifier_call = kprobe_exceptions_notify,
1940 	.priority = 0x7fffffff /* we need to be notified first */
1941 };
1942 
arch_deref_entry_point(void * entry)1943 unsigned long __weak arch_deref_entry_point(void *entry)
1944 {
1945 	return (unsigned long)entry;
1946 }
1947 
1948 #ifdef CONFIG_KRETPROBES
1949 
__kretprobe_trampoline_handler(struct pt_regs * regs,void * trampoline_address,void * frame_pointer)1950 unsigned long __kretprobe_trampoline_handler(struct pt_regs *regs,
1951 					     void *trampoline_address,
1952 					     void *frame_pointer)
1953 {
1954 	struct kretprobe_instance *ri = NULL, *last = NULL;
1955 	struct hlist_head *head;
1956 	struct hlist_node *tmp;
1957 	unsigned long flags;
1958 	kprobe_opcode_t *correct_ret_addr = NULL;
1959 	bool skipped = false;
1960 
1961 	kretprobe_hash_lock(current, &head, &flags);
1962 
1963 	/*
1964 	 * It is possible to have multiple instances associated with a given
1965 	 * task either because multiple functions in the call path have
1966 	 * return probes installed on them, and/or more than one
1967 	 * return probe was registered for a target function.
1968 	 *
1969 	 * We can handle this because:
1970 	 *     - instances are always pushed into the head of the list
1971 	 *     - when multiple return probes are registered for the same
1972 	 *	 function, the (chronologically) first instance's ret_addr
1973 	 *	 will be the real return address, and all the rest will
1974 	 *	 point to kretprobe_trampoline.
1975 	 */
1976 	hlist_for_each_entry(ri, head, hlist) {
1977 		if (ri->task != current)
1978 			/* another task is sharing our hash bucket */
1979 			continue;
1980 		/*
1981 		 * Return probes must be pushed on this hash list correct
1982 		 * order (same as return order) so that it can be popped
1983 		 * correctly. However, if we find it is pushed it incorrect
1984 		 * order, this means we find a function which should not be
1985 		 * probed, because the wrong order entry is pushed on the
1986 		 * path of processing other kretprobe itself.
1987 		 */
1988 		if (ri->fp != frame_pointer) {
1989 			if (!skipped)
1990 				pr_warn("kretprobe is stacked incorrectly. Trying to fixup.\n");
1991 			skipped = true;
1992 			continue;
1993 		}
1994 
1995 		correct_ret_addr = ri->ret_addr;
1996 		if (skipped)
1997 			pr_warn("%ps must be blacklisted because of incorrect kretprobe order\n",
1998 				ri->rp->kp.addr);
1999 
2000 		if (correct_ret_addr != trampoline_address)
2001 			/*
2002 			 * This is the real return address. Any other
2003 			 * instances associated with this task are for
2004 			 * other calls deeper on the call stack
2005 			 */
2006 			break;
2007 	}
2008 
2009 	BUG_ON(!correct_ret_addr || (correct_ret_addr == trampoline_address));
2010 	last = ri;
2011 
2012 	hlist_for_each_entry_safe(ri, tmp, head, hlist) {
2013 		if (ri->task != current)
2014 			/* another task is sharing our hash bucket */
2015 			continue;
2016 		if (ri->fp != frame_pointer)
2017 			continue;
2018 
2019 		if (ri->rp && ri->rp->handler) {
2020 			struct kprobe *prev = kprobe_running();
2021 
2022 			__this_cpu_write(current_kprobe, &ri->rp->kp);
2023 			ri->ret_addr = correct_ret_addr;
2024 			ri->rp->handler(ri, regs);
2025 			__this_cpu_write(current_kprobe, prev);
2026 		}
2027 
2028 		recycle_rp_inst(ri);
2029 
2030 		if (ri == last)
2031 			break;
2032 	}
2033 
2034 	kretprobe_hash_unlock(current, &flags);
2035 
2036 	return (unsigned long)correct_ret_addr;
2037 }
NOKPROBE_SYMBOL(__kretprobe_trampoline_handler)2038 NOKPROBE_SYMBOL(__kretprobe_trampoline_handler)
2039 
2040 /*
2041  * This kprobe pre_handler is registered with every kretprobe. When probe
2042  * hits it will set up the return probe.
2043  */
2044 static int pre_handler_kretprobe(struct kprobe *p, struct pt_regs *regs)
2045 {
2046 	struct kretprobe *rp = container_of(p, struct kretprobe, kp);
2047 	unsigned long hash, flags = 0;
2048 	struct kretprobe_instance *ri;
2049 
2050 	/* TODO: consider to only swap the RA after the last pre_handler fired */
2051 	hash = hash_ptr(current, KPROBE_HASH_BITS);
2052 	/*
2053 	 * Nested is a workaround that will soon not be needed.
2054 	 * There's other protections that make sure the same lock
2055 	 * is not taken on the same CPU that lockdep is unaware of.
2056 	 */
2057 	raw_spin_lock_irqsave_nested(&rp->lock, flags, 1);
2058 	if (!hlist_empty(&rp->free_instances)) {
2059 		ri = hlist_entry(rp->free_instances.first,
2060 				struct kretprobe_instance, hlist);
2061 		hlist_del(&ri->hlist);
2062 		raw_spin_unlock_irqrestore(&rp->lock, flags);
2063 
2064 		ri->rp = rp;
2065 		ri->task = current;
2066 
2067 		if (rp->entry_handler && rp->entry_handler(ri, regs)) {
2068 			raw_spin_lock_irqsave_nested(&rp->lock, flags, 1);
2069 			hlist_add_head(&ri->hlist, &rp->free_instances);
2070 			raw_spin_unlock_irqrestore(&rp->lock, flags);
2071 			return 0;
2072 		}
2073 
2074 		arch_prepare_kretprobe(ri, regs);
2075 
2076 		/* XXX(hch): why is there no hlist_move_head? */
2077 		INIT_HLIST_NODE(&ri->hlist);
2078 		kretprobe_table_lock(hash, &flags);
2079 		hlist_add_head(&ri->hlist, &kretprobe_inst_table[hash]);
2080 		kretprobe_table_unlock(hash, &flags);
2081 	} else {
2082 		rp->nmissed++;
2083 		raw_spin_unlock_irqrestore(&rp->lock, flags);
2084 	}
2085 	return 0;
2086 }
2087 NOKPROBE_SYMBOL(pre_handler_kretprobe);
2088 
arch_kprobe_on_func_entry(unsigned long offset)2089 bool __weak arch_kprobe_on_func_entry(unsigned long offset)
2090 {
2091 	return !offset;
2092 }
2093 
2094 /**
2095  * kprobe_on_func_entry() -- check whether given address is function entry
2096  * @addr: Target address
2097  * @sym:  Target symbol name
2098  * @offset: The offset from the symbol or the address
2099  *
2100  * This checks whether the given @addr+@offset or @sym+@offset is on the
2101  * function entry address or not.
2102  * This returns 0 if it is the function entry, or -EINVAL if it is not.
2103  * And also it returns -ENOENT if it fails the symbol or address lookup.
2104  * Caller must pass @addr or @sym (either one must be NULL), or this
2105  * returns -EINVAL.
2106  */
kprobe_on_func_entry(kprobe_opcode_t * addr,const char * sym,unsigned long offset)2107 int kprobe_on_func_entry(kprobe_opcode_t *addr, const char *sym, unsigned long offset)
2108 {
2109 	kprobe_opcode_t *kp_addr = _kprobe_addr(addr, sym, offset);
2110 
2111 	if (IS_ERR(kp_addr))
2112 		return PTR_ERR(kp_addr);
2113 
2114 	if (!kallsyms_lookup_size_offset((unsigned long)kp_addr, NULL, &offset))
2115 		return -ENOENT;
2116 
2117 	if (!arch_kprobe_on_func_entry(offset))
2118 		return -EINVAL;
2119 
2120 	return 0;
2121 }
2122 
register_kretprobe(struct kretprobe * rp)2123 int register_kretprobe(struct kretprobe *rp)
2124 {
2125 	int ret;
2126 	struct kretprobe_instance *inst;
2127 	int i;
2128 	void *addr;
2129 
2130 	ret = kprobe_on_func_entry(rp->kp.addr, rp->kp.symbol_name, rp->kp.offset);
2131 	if (ret)
2132 		return ret;
2133 
2134 	/* If only rp->kp.addr is specified, check reregistering kprobes */
2135 	if (rp->kp.addr && check_kprobe_rereg(&rp->kp))
2136 		return -EINVAL;
2137 
2138 	if (kretprobe_blacklist_size) {
2139 		addr = kprobe_addr(&rp->kp);
2140 		if (IS_ERR(addr))
2141 			return PTR_ERR(addr);
2142 
2143 		for (i = 0; kretprobe_blacklist[i].name != NULL; i++) {
2144 			if (kretprobe_blacklist[i].addr == addr)
2145 				return -EINVAL;
2146 		}
2147 	}
2148 
2149 	if (rp->data_size > KRETPROBE_MAX_DATA_SIZE)
2150 		return -E2BIG;
2151 
2152 	rp->kp.pre_handler = pre_handler_kretprobe;
2153 	rp->kp.post_handler = NULL;
2154 	rp->kp.fault_handler = NULL;
2155 
2156 	/* Pre-allocate memory for max kretprobe instances */
2157 	if (rp->maxactive <= 0) {
2158 #ifdef CONFIG_PREEMPTION
2159 		rp->maxactive = max_t(unsigned int, 10, 2*num_possible_cpus());
2160 #else
2161 		rp->maxactive = num_possible_cpus();
2162 #endif
2163 	}
2164 	raw_spin_lock_init(&rp->lock);
2165 	INIT_HLIST_HEAD(&rp->free_instances);
2166 	for (i = 0; i < rp->maxactive; i++) {
2167 		inst = kmalloc(sizeof(struct kretprobe_instance) +
2168 			       rp->data_size, GFP_KERNEL);
2169 		if (inst == NULL) {
2170 			free_rp_inst(rp);
2171 			return -ENOMEM;
2172 		}
2173 		INIT_HLIST_NODE(&inst->hlist);
2174 		hlist_add_head(&inst->hlist, &rp->free_instances);
2175 	}
2176 
2177 	rp->nmissed = 0;
2178 	/* Establish function entry probe point */
2179 	ret = register_kprobe(&rp->kp);
2180 	if (ret != 0)
2181 		free_rp_inst(rp);
2182 	return ret;
2183 }
2184 EXPORT_SYMBOL_GPL(register_kretprobe);
2185 
register_kretprobes(struct kretprobe ** rps,int num)2186 int register_kretprobes(struct kretprobe **rps, int num)
2187 {
2188 	int ret = 0, i;
2189 
2190 	if (num <= 0)
2191 		return -EINVAL;
2192 	for (i = 0; i < num; i++) {
2193 		ret = register_kretprobe(rps[i]);
2194 		if (ret < 0) {
2195 			if (i > 0)
2196 				unregister_kretprobes(rps, i);
2197 			break;
2198 		}
2199 	}
2200 	return ret;
2201 }
2202 EXPORT_SYMBOL_GPL(register_kretprobes);
2203 
unregister_kretprobe(struct kretprobe * rp)2204 void unregister_kretprobe(struct kretprobe *rp)
2205 {
2206 	unregister_kretprobes(&rp, 1);
2207 }
2208 EXPORT_SYMBOL_GPL(unregister_kretprobe);
2209 
unregister_kretprobes(struct kretprobe ** rps,int num)2210 void unregister_kretprobes(struct kretprobe **rps, int num)
2211 {
2212 	int i;
2213 
2214 	if (num <= 0)
2215 		return;
2216 	mutex_lock(&kprobe_mutex);
2217 	for (i = 0; i < num; i++)
2218 		if (__unregister_kprobe_top(&rps[i]->kp) < 0)
2219 			rps[i]->kp.addr = NULL;
2220 	mutex_unlock(&kprobe_mutex);
2221 
2222 	synchronize_rcu();
2223 	for (i = 0; i < num; i++) {
2224 		if (rps[i]->kp.addr) {
2225 			__unregister_kprobe_bottom(&rps[i]->kp);
2226 			cleanup_rp_inst(rps[i]);
2227 		}
2228 	}
2229 }
2230 EXPORT_SYMBOL_GPL(unregister_kretprobes);
2231 
2232 #else /* CONFIG_KRETPROBES */
register_kretprobe(struct kretprobe * rp)2233 int register_kretprobe(struct kretprobe *rp)
2234 {
2235 	return -ENOSYS;
2236 }
2237 EXPORT_SYMBOL_GPL(register_kretprobe);
2238 
register_kretprobes(struct kretprobe ** rps,int num)2239 int register_kretprobes(struct kretprobe **rps, int num)
2240 {
2241 	return -ENOSYS;
2242 }
2243 EXPORT_SYMBOL_GPL(register_kretprobes);
2244 
unregister_kretprobe(struct kretprobe * rp)2245 void unregister_kretprobe(struct kretprobe *rp)
2246 {
2247 }
2248 EXPORT_SYMBOL_GPL(unregister_kretprobe);
2249 
unregister_kretprobes(struct kretprobe ** rps,int num)2250 void unregister_kretprobes(struct kretprobe **rps, int num)
2251 {
2252 }
2253 EXPORT_SYMBOL_GPL(unregister_kretprobes);
2254 
pre_handler_kretprobe(struct kprobe * p,struct pt_regs * regs)2255 static int pre_handler_kretprobe(struct kprobe *p, struct pt_regs *regs)
2256 {
2257 	return 0;
2258 }
2259 NOKPROBE_SYMBOL(pre_handler_kretprobe);
2260 
2261 #endif /* CONFIG_KRETPROBES */
2262 
2263 /* Set the kprobe gone and remove its instruction buffer. */
kill_kprobe(struct kprobe * p)2264 static void kill_kprobe(struct kprobe *p)
2265 {
2266 	struct kprobe *kp;
2267 
2268 	lockdep_assert_held(&kprobe_mutex);
2269 
2270 	if (WARN_ON_ONCE(kprobe_gone(p)))
2271 		return;
2272 
2273 	p->flags |= KPROBE_FLAG_GONE;
2274 	if (kprobe_aggrprobe(p)) {
2275 		/*
2276 		 * If this is an aggr_kprobe, we have to list all the
2277 		 * chained probes and mark them GONE.
2278 		 */
2279 		list_for_each_entry(kp, &p->list, list)
2280 			kp->flags |= KPROBE_FLAG_GONE;
2281 		p->post_handler = NULL;
2282 		kill_optimized_kprobe(p);
2283 	}
2284 	/*
2285 	 * Here, we can remove insn_slot safely, because no thread calls
2286 	 * the original probed function (which will be freed soon) any more.
2287 	 */
2288 	arch_remove_kprobe(p);
2289 
2290 	/*
2291 	 * The module is going away. We should disarm the kprobe which
2292 	 * is using ftrace, because ftrace framework is still available at
2293 	 * MODULE_STATE_GOING notification.
2294 	 */
2295 	if (kprobe_ftrace(p) && !kprobe_disabled(p) && !kprobes_all_disarmed)
2296 		disarm_kprobe_ftrace(p);
2297 }
2298 
2299 /* Disable one kprobe */
disable_kprobe(struct kprobe * kp)2300 int disable_kprobe(struct kprobe *kp)
2301 {
2302 	int ret = 0;
2303 	struct kprobe *p;
2304 
2305 	mutex_lock(&kprobe_mutex);
2306 
2307 	/* Disable this kprobe */
2308 	p = __disable_kprobe(kp);
2309 	if (IS_ERR(p))
2310 		ret = PTR_ERR(p);
2311 
2312 	mutex_unlock(&kprobe_mutex);
2313 	return ret;
2314 }
2315 EXPORT_SYMBOL_GPL(disable_kprobe);
2316 
2317 /* Enable one kprobe */
enable_kprobe(struct kprobe * kp)2318 int enable_kprobe(struct kprobe *kp)
2319 {
2320 	int ret = 0;
2321 	struct kprobe *p;
2322 
2323 	mutex_lock(&kprobe_mutex);
2324 
2325 	/* Check whether specified probe is valid. */
2326 	p = __get_valid_kprobe(kp);
2327 	if (unlikely(p == NULL)) {
2328 		ret = -EINVAL;
2329 		goto out;
2330 	}
2331 
2332 	if (kprobe_gone(kp)) {
2333 		/* This kprobe has gone, we couldn't enable it. */
2334 		ret = -EINVAL;
2335 		goto out;
2336 	}
2337 
2338 	if (p != kp)
2339 		kp->flags &= ~KPROBE_FLAG_DISABLED;
2340 
2341 	if (!kprobes_all_disarmed && kprobe_disabled(p)) {
2342 		p->flags &= ~KPROBE_FLAG_DISABLED;
2343 		ret = arm_kprobe(p);
2344 		if (ret) {
2345 			p->flags |= KPROBE_FLAG_DISABLED;
2346 			if (p != kp)
2347 				kp->flags |= KPROBE_FLAG_DISABLED;
2348 		}
2349 	}
2350 out:
2351 	mutex_unlock(&kprobe_mutex);
2352 	return ret;
2353 }
2354 EXPORT_SYMBOL_GPL(enable_kprobe);
2355 
2356 /* Caller must NOT call this in usual path. This is only for critical case */
dump_kprobe(struct kprobe * kp)2357 void dump_kprobe(struct kprobe *kp)
2358 {
2359 	pr_err("Dumping kprobe:\n");
2360 	pr_err("Name: %s\nOffset: %x\nAddress: %pS\n",
2361 	       kp->symbol_name, kp->offset, kp->addr);
2362 }
2363 NOKPROBE_SYMBOL(dump_kprobe);
2364 
kprobe_add_ksym_blacklist(unsigned long entry)2365 int kprobe_add_ksym_blacklist(unsigned long entry)
2366 {
2367 	struct kprobe_blacklist_entry *ent;
2368 	unsigned long offset = 0, size = 0;
2369 
2370 	if (!kernel_text_address(entry) ||
2371 	    !kallsyms_lookup_size_offset(entry, &size, &offset))
2372 		return -EINVAL;
2373 
2374 	ent = kmalloc(sizeof(*ent), GFP_KERNEL);
2375 	if (!ent)
2376 		return -ENOMEM;
2377 	ent->start_addr = entry;
2378 	ent->end_addr = entry + size;
2379 	INIT_LIST_HEAD(&ent->list);
2380 	list_add_tail(&ent->list, &kprobe_blacklist);
2381 
2382 	return (int)size;
2383 }
2384 
2385 /* Add all symbols in given area into kprobe blacklist */
kprobe_add_area_blacklist(unsigned long start,unsigned long end)2386 int kprobe_add_area_blacklist(unsigned long start, unsigned long end)
2387 {
2388 	unsigned long entry;
2389 	int ret = 0;
2390 
2391 	for (entry = start; entry < end; entry += ret) {
2392 		ret = kprobe_add_ksym_blacklist(entry);
2393 		if (ret < 0)
2394 			return ret;
2395 		if (ret == 0)	/* In case of alias symbol */
2396 			ret = 1;
2397 	}
2398 	return 0;
2399 }
2400 
2401 /* Remove all symbols in given area from kprobe blacklist */
kprobe_remove_area_blacklist(unsigned long start,unsigned long end)2402 static void kprobe_remove_area_blacklist(unsigned long start, unsigned long end)
2403 {
2404 	struct kprobe_blacklist_entry *ent, *n;
2405 
2406 	list_for_each_entry_safe(ent, n, &kprobe_blacklist, list) {
2407 		if (ent->start_addr < start || ent->start_addr >= end)
2408 			continue;
2409 		list_del(&ent->list);
2410 		kfree(ent);
2411 	}
2412 }
2413 
kprobe_remove_ksym_blacklist(unsigned long entry)2414 static void kprobe_remove_ksym_blacklist(unsigned long entry)
2415 {
2416 	kprobe_remove_area_blacklist(entry, entry + 1);
2417 }
2418 
arch_kprobe_get_kallsym(unsigned int * symnum,unsigned long * value,char * type,char * sym)2419 int __weak arch_kprobe_get_kallsym(unsigned int *symnum, unsigned long *value,
2420 				   char *type, char *sym)
2421 {
2422 	return -ERANGE;
2423 }
2424 
kprobe_get_kallsym(unsigned int symnum,unsigned long * value,char * type,char * sym)2425 int kprobe_get_kallsym(unsigned int symnum, unsigned long *value, char *type,
2426 		       char *sym)
2427 {
2428 #ifdef __ARCH_WANT_KPROBES_INSN_SLOT
2429 	if (!kprobe_cache_get_kallsym(&kprobe_insn_slots, &symnum, value, type, sym))
2430 		return 0;
2431 #ifdef CONFIG_OPTPROBES
2432 	if (!kprobe_cache_get_kallsym(&kprobe_optinsn_slots, &symnum, value, type, sym))
2433 		return 0;
2434 #endif
2435 #endif
2436 	if (!arch_kprobe_get_kallsym(&symnum, value, type, sym))
2437 		return 0;
2438 	return -ERANGE;
2439 }
2440 
arch_populate_kprobe_blacklist(void)2441 int __init __weak arch_populate_kprobe_blacklist(void)
2442 {
2443 	return 0;
2444 }
2445 
2446 /*
2447  * Lookup and populate the kprobe_blacklist.
2448  *
2449  * Unlike the kretprobe blacklist, we'll need to determine
2450  * the range of addresses that belong to the said functions,
2451  * since a kprobe need not necessarily be at the beginning
2452  * of a function.
2453  */
populate_kprobe_blacklist(unsigned long * start,unsigned long * end)2454 static int __init populate_kprobe_blacklist(unsigned long *start,
2455 					     unsigned long *end)
2456 {
2457 	unsigned long entry;
2458 	unsigned long *iter;
2459 	int ret;
2460 
2461 	for (iter = start; iter < end; iter++) {
2462 		entry = arch_deref_entry_point((void *)*iter);
2463 		ret = kprobe_add_ksym_blacklist(entry);
2464 		if (ret == -EINVAL)
2465 			continue;
2466 		if (ret < 0)
2467 			return ret;
2468 	}
2469 
2470 	/* Symbols in __kprobes_text are blacklisted */
2471 	ret = kprobe_add_area_blacklist((unsigned long)__kprobes_text_start,
2472 					(unsigned long)__kprobes_text_end);
2473 	if (ret)
2474 		return ret;
2475 
2476 	/* Symbols in noinstr section are blacklisted */
2477 	ret = kprobe_add_area_blacklist((unsigned long)__noinstr_text_start,
2478 					(unsigned long)__noinstr_text_end);
2479 
2480 	return ret ? : arch_populate_kprobe_blacklist();
2481 }
2482 
add_module_kprobe_blacklist(struct module * mod)2483 static void add_module_kprobe_blacklist(struct module *mod)
2484 {
2485 	unsigned long start, end;
2486 	int i;
2487 
2488 	if (mod->kprobe_blacklist) {
2489 		for (i = 0; i < mod->num_kprobe_blacklist; i++)
2490 			kprobe_add_ksym_blacklist(mod->kprobe_blacklist[i]);
2491 	}
2492 
2493 	start = (unsigned long)mod->kprobes_text_start;
2494 	if (start) {
2495 		end = start + mod->kprobes_text_size;
2496 		kprobe_add_area_blacklist(start, end);
2497 	}
2498 
2499 	start = (unsigned long)mod->noinstr_text_start;
2500 	if (start) {
2501 		end = start + mod->noinstr_text_size;
2502 		kprobe_add_area_blacklist(start, end);
2503 	}
2504 }
2505 
remove_module_kprobe_blacklist(struct module * mod)2506 static void remove_module_kprobe_blacklist(struct module *mod)
2507 {
2508 	unsigned long start, end;
2509 	int i;
2510 
2511 	if (mod->kprobe_blacklist) {
2512 		for (i = 0; i < mod->num_kprobe_blacklist; i++)
2513 			kprobe_remove_ksym_blacklist(mod->kprobe_blacklist[i]);
2514 	}
2515 
2516 	start = (unsigned long)mod->kprobes_text_start;
2517 	if (start) {
2518 		end = start + mod->kprobes_text_size;
2519 		kprobe_remove_area_blacklist(start, end);
2520 	}
2521 
2522 	start = (unsigned long)mod->noinstr_text_start;
2523 	if (start) {
2524 		end = start + mod->noinstr_text_size;
2525 		kprobe_remove_area_blacklist(start, end);
2526 	}
2527 }
2528 
2529 /* Module notifier call back, checking kprobes on the module */
kprobes_module_callback(struct notifier_block * nb,unsigned long val,void * data)2530 static int kprobes_module_callback(struct notifier_block *nb,
2531 				   unsigned long val, void *data)
2532 {
2533 	struct module *mod = data;
2534 	struct hlist_head *head;
2535 	struct kprobe *p;
2536 	unsigned int i;
2537 	int checkcore = (val == MODULE_STATE_GOING);
2538 
2539 	if (val == MODULE_STATE_COMING) {
2540 		mutex_lock(&kprobe_mutex);
2541 		add_module_kprobe_blacklist(mod);
2542 		mutex_unlock(&kprobe_mutex);
2543 	}
2544 	if (val != MODULE_STATE_GOING && val != MODULE_STATE_LIVE)
2545 		return NOTIFY_DONE;
2546 
2547 	/*
2548 	 * When MODULE_STATE_GOING was notified, both of module .text and
2549 	 * .init.text sections would be freed. When MODULE_STATE_LIVE was
2550 	 * notified, only .init.text section would be freed. We need to
2551 	 * disable kprobes which have been inserted in the sections.
2552 	 */
2553 	mutex_lock(&kprobe_mutex);
2554 	for (i = 0; i < KPROBE_TABLE_SIZE; i++) {
2555 		head = &kprobe_table[i];
2556 		hlist_for_each_entry(p, head, hlist) {
2557 			if (kprobe_gone(p))
2558 				continue;
2559 
2560 			if (within_module_init((unsigned long)p->addr, mod) ||
2561 			    (checkcore &&
2562 			     within_module_core((unsigned long)p->addr, mod))) {
2563 				/*
2564 				 * The vaddr this probe is installed will soon
2565 				 * be vfreed buy not synced to disk. Hence,
2566 				 * disarming the breakpoint isn't needed.
2567 				 *
2568 				 * Note, this will also move any optimized probes
2569 				 * that are pending to be removed from their
2570 				 * corresponding lists to the freeing_list and
2571 				 * will not be touched by the delayed
2572 				 * kprobe_optimizer work handler.
2573 				 */
2574 				kill_kprobe(p);
2575 			}
2576 		}
2577 	}
2578 	if (val == MODULE_STATE_GOING)
2579 		remove_module_kprobe_blacklist(mod);
2580 	mutex_unlock(&kprobe_mutex);
2581 	return NOTIFY_DONE;
2582 }
2583 
2584 static struct notifier_block kprobe_module_nb = {
2585 	.notifier_call = kprobes_module_callback,
2586 	.priority = 0
2587 };
2588 
2589 /* Markers of _kprobe_blacklist section */
2590 extern unsigned long __start_kprobe_blacklist[];
2591 extern unsigned long __stop_kprobe_blacklist[];
2592 
kprobe_free_init_mem(void)2593 void kprobe_free_init_mem(void)
2594 {
2595 	void *start = (void *)(&__init_begin);
2596 	void *end = (void *)(&__init_end);
2597 	struct hlist_head *head;
2598 	struct kprobe *p;
2599 	int i;
2600 
2601 	mutex_lock(&kprobe_mutex);
2602 
2603 	/* Kill all kprobes on initmem */
2604 	for (i = 0; i < KPROBE_TABLE_SIZE; i++) {
2605 		head = &kprobe_table[i];
2606 		hlist_for_each_entry(p, head, hlist) {
2607 			if (start <= (void *)p->addr && (void *)p->addr < end)
2608 				kill_kprobe(p);
2609 		}
2610 	}
2611 
2612 	mutex_unlock(&kprobe_mutex);
2613 }
2614 
init_kprobes(void)2615 static int __init init_kprobes(void)
2616 {
2617 	int i, err = 0;
2618 
2619 	/* FIXME allocate the probe table, currently defined statically */
2620 	/* initialize all list heads */
2621 	for (i = 0; i < KPROBE_TABLE_SIZE; i++) {
2622 		INIT_HLIST_HEAD(&kprobe_table[i]);
2623 		INIT_HLIST_HEAD(&kretprobe_inst_table[i]);
2624 		raw_spin_lock_init(&(kretprobe_table_locks[i].lock));
2625 	}
2626 
2627 	err = populate_kprobe_blacklist(__start_kprobe_blacklist,
2628 					__stop_kprobe_blacklist);
2629 	if (err) {
2630 		pr_err("kprobes: failed to populate blacklist: %d\n", err);
2631 		pr_err("Please take care of using kprobes.\n");
2632 	}
2633 
2634 	if (kretprobe_blacklist_size) {
2635 		/* lookup the function address from its name */
2636 		for (i = 0; kretprobe_blacklist[i].name != NULL; i++) {
2637 			kretprobe_blacklist[i].addr =
2638 				kprobe_lookup_name(kretprobe_blacklist[i].name, 0);
2639 			if (!kretprobe_blacklist[i].addr)
2640 				printk("kretprobe: lookup failed: %s\n",
2641 				       kretprobe_blacklist[i].name);
2642 		}
2643 	}
2644 
2645 	/* By default, kprobes are armed */
2646 	kprobes_all_disarmed = false;
2647 
2648 #if defined(CONFIG_OPTPROBES) && defined(__ARCH_WANT_KPROBES_INSN_SLOT)
2649 	/* Init kprobe_optinsn_slots for allocation */
2650 	kprobe_optinsn_slots.insn_size = MAX_OPTINSN_SIZE;
2651 #endif
2652 
2653 	err = arch_init_kprobes();
2654 	if (!err)
2655 		err = register_die_notifier(&kprobe_exceptions_nb);
2656 	if (!err)
2657 		err = register_module_notifier(&kprobe_module_nb);
2658 
2659 	kprobes_initialized = (err == 0);
2660 
2661 	if (!err)
2662 		init_test_probes();
2663 	return err;
2664 }
2665 early_initcall(init_kprobes);
2666 
2667 #if defined(CONFIG_OPTPROBES)
init_optprobes(void)2668 static int __init init_optprobes(void)
2669 {
2670 	/*
2671 	 * Enable kprobe optimization - this kicks the optimizer which
2672 	 * depends on synchronize_rcu_tasks() and ksoftirqd, that is
2673 	 * not spawned in early initcall. So delay the optimization.
2674 	 */
2675 	optimize_all_kprobes();
2676 
2677 	return 0;
2678 }
2679 subsys_initcall(init_optprobes);
2680 #endif
2681 
2682 #ifdef CONFIG_DEBUG_FS
report_probe(struct seq_file * pi,struct kprobe * p,const char * sym,int offset,char * modname,struct kprobe * pp)2683 static void report_probe(struct seq_file *pi, struct kprobe *p,
2684 		const char *sym, int offset, char *modname, struct kprobe *pp)
2685 {
2686 	char *kprobe_type;
2687 	void *addr = p->addr;
2688 
2689 	if (p->pre_handler == pre_handler_kretprobe)
2690 		kprobe_type = "r";
2691 	else
2692 		kprobe_type = "k";
2693 
2694 	if (!kallsyms_show_value(pi->file->f_cred))
2695 		addr = NULL;
2696 
2697 	if (sym)
2698 		seq_printf(pi, "%px  %s  %s+0x%x  %s ",
2699 			addr, kprobe_type, sym, offset,
2700 			(modname ? modname : " "));
2701 	else	/* try to use %pS */
2702 		seq_printf(pi, "%px  %s  %pS ",
2703 			addr, kprobe_type, p->addr);
2704 
2705 	if (!pp)
2706 		pp = p;
2707 	seq_printf(pi, "%s%s%s%s\n",
2708 		(kprobe_gone(p) ? "[GONE]" : ""),
2709 		((kprobe_disabled(p) && !kprobe_gone(p)) ?  "[DISABLED]" : ""),
2710 		(kprobe_optimized(pp) ? "[OPTIMIZED]" : ""),
2711 		(kprobe_ftrace(pp) ? "[FTRACE]" : ""));
2712 }
2713 
kprobe_seq_start(struct seq_file * f,loff_t * pos)2714 static void *kprobe_seq_start(struct seq_file *f, loff_t *pos)
2715 {
2716 	return (*pos < KPROBE_TABLE_SIZE) ? pos : NULL;
2717 }
2718 
kprobe_seq_next(struct seq_file * f,void * v,loff_t * pos)2719 static void *kprobe_seq_next(struct seq_file *f, void *v, loff_t *pos)
2720 {
2721 	(*pos)++;
2722 	if (*pos >= KPROBE_TABLE_SIZE)
2723 		return NULL;
2724 	return pos;
2725 }
2726 
kprobe_seq_stop(struct seq_file * f,void * v)2727 static void kprobe_seq_stop(struct seq_file *f, void *v)
2728 {
2729 	/* Nothing to do */
2730 }
2731 
show_kprobe_addr(struct seq_file * pi,void * v)2732 static int show_kprobe_addr(struct seq_file *pi, void *v)
2733 {
2734 	struct hlist_head *head;
2735 	struct kprobe *p, *kp;
2736 	const char *sym = NULL;
2737 	unsigned int i = *(loff_t *) v;
2738 	unsigned long offset = 0;
2739 	char *modname, namebuf[KSYM_NAME_LEN];
2740 
2741 	head = &kprobe_table[i];
2742 	preempt_disable();
2743 	hlist_for_each_entry_rcu(p, head, hlist) {
2744 		sym = kallsyms_lookup((unsigned long)p->addr, NULL,
2745 					&offset, &modname, namebuf);
2746 		if (kprobe_aggrprobe(p)) {
2747 			list_for_each_entry_rcu(kp, &p->list, list)
2748 				report_probe(pi, kp, sym, offset, modname, p);
2749 		} else
2750 			report_probe(pi, p, sym, offset, modname, NULL);
2751 	}
2752 	preempt_enable();
2753 	return 0;
2754 }
2755 
2756 static const struct seq_operations kprobes_sops = {
2757 	.start = kprobe_seq_start,
2758 	.next  = kprobe_seq_next,
2759 	.stop  = kprobe_seq_stop,
2760 	.show  = show_kprobe_addr
2761 };
2762 
2763 DEFINE_SEQ_ATTRIBUTE(kprobes);
2764 
2765 /* kprobes/blacklist -- shows which functions can not be probed */
kprobe_blacklist_seq_start(struct seq_file * m,loff_t * pos)2766 static void *kprobe_blacklist_seq_start(struct seq_file *m, loff_t *pos)
2767 {
2768 	mutex_lock(&kprobe_mutex);
2769 	return seq_list_start(&kprobe_blacklist, *pos);
2770 }
2771 
kprobe_blacklist_seq_next(struct seq_file * m,void * v,loff_t * pos)2772 static void *kprobe_blacklist_seq_next(struct seq_file *m, void *v, loff_t *pos)
2773 {
2774 	return seq_list_next(v, &kprobe_blacklist, pos);
2775 }
2776 
kprobe_blacklist_seq_show(struct seq_file * m,void * v)2777 static int kprobe_blacklist_seq_show(struct seq_file *m, void *v)
2778 {
2779 	struct kprobe_blacklist_entry *ent =
2780 		list_entry(v, struct kprobe_blacklist_entry, list);
2781 
2782 	/*
2783 	 * If /proc/kallsyms is not showing kernel address, we won't
2784 	 * show them here either.
2785 	 */
2786 	if (!kallsyms_show_value(m->file->f_cred))
2787 		seq_printf(m, "0x%px-0x%px\t%ps\n", NULL, NULL,
2788 			   (void *)ent->start_addr);
2789 	else
2790 		seq_printf(m, "0x%px-0x%px\t%ps\n", (void *)ent->start_addr,
2791 			   (void *)ent->end_addr, (void *)ent->start_addr);
2792 	return 0;
2793 }
2794 
kprobe_blacklist_seq_stop(struct seq_file * f,void * v)2795 static void kprobe_blacklist_seq_stop(struct seq_file *f, void *v)
2796 {
2797 	mutex_unlock(&kprobe_mutex);
2798 }
2799 
2800 static const struct seq_operations kprobe_blacklist_sops = {
2801 	.start = kprobe_blacklist_seq_start,
2802 	.next  = kprobe_blacklist_seq_next,
2803 	.stop  = kprobe_blacklist_seq_stop,
2804 	.show  = kprobe_blacklist_seq_show,
2805 };
2806 DEFINE_SEQ_ATTRIBUTE(kprobe_blacklist);
2807 
arm_all_kprobes(void)2808 static int arm_all_kprobes(void)
2809 {
2810 	struct hlist_head *head;
2811 	struct kprobe *p;
2812 	unsigned int i, total = 0, errors = 0;
2813 	int err, ret = 0;
2814 
2815 	mutex_lock(&kprobe_mutex);
2816 
2817 	/* If kprobes are armed, just return */
2818 	if (!kprobes_all_disarmed)
2819 		goto already_enabled;
2820 
2821 	/*
2822 	 * optimize_kprobe() called by arm_kprobe() checks
2823 	 * kprobes_all_disarmed, so set kprobes_all_disarmed before
2824 	 * arm_kprobe.
2825 	 */
2826 	kprobes_all_disarmed = false;
2827 	/* Arming kprobes doesn't optimize kprobe itself */
2828 	for (i = 0; i < KPROBE_TABLE_SIZE; i++) {
2829 		head = &kprobe_table[i];
2830 		/* Arm all kprobes on a best-effort basis */
2831 		hlist_for_each_entry(p, head, hlist) {
2832 			if (!kprobe_disabled(p)) {
2833 				err = arm_kprobe(p);
2834 				if (err)  {
2835 					errors++;
2836 					ret = err;
2837 				}
2838 				total++;
2839 			}
2840 		}
2841 	}
2842 
2843 	if (errors)
2844 		pr_warn("Kprobes globally enabled, but failed to arm %d out of %d probes\n",
2845 			errors, total);
2846 	else
2847 		pr_info("Kprobes globally enabled\n");
2848 
2849 already_enabled:
2850 	mutex_unlock(&kprobe_mutex);
2851 	return ret;
2852 }
2853 
disarm_all_kprobes(void)2854 static int disarm_all_kprobes(void)
2855 {
2856 	struct hlist_head *head;
2857 	struct kprobe *p;
2858 	unsigned int i, total = 0, errors = 0;
2859 	int err, ret = 0;
2860 
2861 	mutex_lock(&kprobe_mutex);
2862 
2863 	/* If kprobes are already disarmed, just return */
2864 	if (kprobes_all_disarmed) {
2865 		mutex_unlock(&kprobe_mutex);
2866 		return 0;
2867 	}
2868 
2869 	kprobes_all_disarmed = true;
2870 
2871 	for (i = 0; i < KPROBE_TABLE_SIZE; i++) {
2872 		head = &kprobe_table[i];
2873 		/* Disarm all kprobes on a best-effort basis */
2874 		hlist_for_each_entry(p, head, hlist) {
2875 			if (!arch_trampoline_kprobe(p) && !kprobe_disabled(p)) {
2876 				err = disarm_kprobe(p, false);
2877 				if (err) {
2878 					errors++;
2879 					ret = err;
2880 				}
2881 				total++;
2882 			}
2883 		}
2884 	}
2885 
2886 	if (errors)
2887 		pr_warn("Kprobes globally disabled, but failed to disarm %d out of %d probes\n",
2888 			errors, total);
2889 	else
2890 		pr_info("Kprobes globally disabled\n");
2891 
2892 	mutex_unlock(&kprobe_mutex);
2893 
2894 	/* Wait for disarming all kprobes by optimizer */
2895 	wait_for_kprobe_optimizer();
2896 
2897 	return ret;
2898 }
2899 
2900 /*
2901  * XXX: The debugfs bool file interface doesn't allow for callbacks
2902  * when the bool state is switched. We can reuse that facility when
2903  * available
2904  */
read_enabled_file_bool(struct file * file,char __user * user_buf,size_t count,loff_t * ppos)2905 static ssize_t read_enabled_file_bool(struct file *file,
2906 	       char __user *user_buf, size_t count, loff_t *ppos)
2907 {
2908 	char buf[3];
2909 
2910 	if (!kprobes_all_disarmed)
2911 		buf[0] = '1';
2912 	else
2913 		buf[0] = '0';
2914 	buf[1] = '\n';
2915 	buf[2] = 0x00;
2916 	return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
2917 }
2918 
write_enabled_file_bool(struct file * file,const char __user * user_buf,size_t count,loff_t * ppos)2919 static ssize_t write_enabled_file_bool(struct file *file,
2920 	       const char __user *user_buf, size_t count, loff_t *ppos)
2921 {
2922 	char buf[32];
2923 	size_t buf_size;
2924 	int ret = 0;
2925 
2926 	buf_size = min(count, (sizeof(buf)-1));
2927 	if (copy_from_user(buf, user_buf, buf_size))
2928 		return -EFAULT;
2929 
2930 	buf[buf_size] = '\0';
2931 	switch (buf[0]) {
2932 	case 'y':
2933 	case 'Y':
2934 	case '1':
2935 		ret = arm_all_kprobes();
2936 		break;
2937 	case 'n':
2938 	case 'N':
2939 	case '0':
2940 		ret = disarm_all_kprobes();
2941 		break;
2942 	default:
2943 		return -EINVAL;
2944 	}
2945 
2946 	if (ret)
2947 		return ret;
2948 
2949 	return count;
2950 }
2951 
2952 static const struct file_operations fops_kp = {
2953 	.read =         read_enabled_file_bool,
2954 	.write =        write_enabled_file_bool,
2955 	.llseek =	default_llseek,
2956 };
2957 
debugfs_kprobe_init(void)2958 static int __init debugfs_kprobe_init(void)
2959 {
2960 	struct dentry *dir;
2961 
2962 	dir = debugfs_create_dir("kprobes", NULL);
2963 
2964 	debugfs_create_file("list", 0400, dir, NULL, &kprobes_fops);
2965 
2966 	debugfs_create_file("enabled", 0600, dir, NULL, &fops_kp);
2967 
2968 	debugfs_create_file("blacklist", 0400, dir, NULL,
2969 			    &kprobe_blacklist_fops);
2970 
2971 	return 0;
2972 }
2973 
2974 late_initcall(debugfs_kprobe_init);
2975 #endif /* CONFIG_DEBUG_FS */
2976