• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * User-space Probes (UProbes)
3  *
4  * This program is free software; you can redistribute it and/or modify
5  * it under the terms of the GNU General Public License as published by
6  * the Free Software Foundation; either version 2 of the License, or
7  * (at your option) any later version.
8  *
9  * This program is distributed in the hope that it will be useful,
10  * but WITHOUT ANY WARRANTY; without even the implied warranty of
11  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
12  * GNU General Public License for more details.
13  *
14  * You should have received a copy of the GNU General Public License
15  * along with this program; if not, write to the Free Software
16  * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
17  *
18  * Copyright (C) IBM Corporation, 2008-2012
19  * Authors:
20  *	Srikar Dronamraju
21  *	Jim Keniston
22  * Copyright (C) 2011-2012 Red Hat, Inc., Peter Zijlstra
23  */
24 
25 #include <linux/kernel.h>
26 #include <linux/highmem.h>
27 #include <linux/pagemap.h>	/* read_mapping_page */
28 #include <linux/slab.h>
29 #include <linux/sched.h>
30 #include <linux/export.h>
31 #include <linux/rmap.h>		/* anon_vma_prepare */
32 #include <linux/mmu_notifier.h>	/* set_pte_at_notify */
33 #include <linux/swap.h>		/* try_to_free_swap */
34 #include <linux/ptrace.h>	/* user_enable_single_step */
35 #include <linux/kdebug.h>	/* notifier mechanism */
36 #include "../../mm/internal.h"	/* munlock_vma_page */
37 #include <linux/percpu-rwsem.h>
38 #include <linux/task_work.h>
39 #include <linux/shmem_fs.h>
40 
41 #include <linux/uprobes.h>
42 
43 #define UINSNS_PER_PAGE			(PAGE_SIZE/UPROBE_XOL_SLOT_BYTES)
44 #define MAX_UPROBE_XOL_SLOTS		UINSNS_PER_PAGE
45 
46 static struct rb_root uprobes_tree = RB_ROOT;
47 /*
48  * allows us to skip the uprobe_mmap if there are no uprobe events active
49  * at this time.  Probably a fine grained per inode count is better?
50  */
51 #define no_uprobe_events()	RB_EMPTY_ROOT(&uprobes_tree)
52 
53 static DEFINE_SPINLOCK(uprobes_treelock);	/* serialize rbtree access */
54 
55 #define UPROBES_HASH_SZ	13
56 /* serialize uprobe->pending_list */
57 static struct mutex uprobes_mmap_mutex[UPROBES_HASH_SZ];
58 #define uprobes_mmap_hash(v)	(&uprobes_mmap_mutex[((unsigned long)(v)) % UPROBES_HASH_SZ])
59 
60 static struct percpu_rw_semaphore dup_mmap_sem;
61 
62 /* Have a copy of original instruction */
63 #define UPROBE_COPY_INSN	0
64 
65 struct uprobe {
66 	struct rb_node		rb_node;	/* node in the rb tree */
67 	atomic_t		ref;
68 	struct rw_semaphore	register_rwsem;
69 	struct rw_semaphore	consumer_rwsem;
70 	struct list_head	pending_list;
71 	struct uprobe_consumer	*consumers;
72 	struct inode		*inode;		/* Also hold a ref to inode */
73 	loff_t			offset;
74 	unsigned long		flags;
75 
76 	/*
77 	 * The generic code assumes that it has two members of unknown type
78 	 * owned by the arch-specific code:
79 	 *
80 	 * 	insn -	copy_insn() saves the original instruction here for
81 	 *		arch_uprobe_analyze_insn().
82 	 *
83 	 *	ixol -	potentially modified instruction to execute out of
84 	 *		line, copied to xol_area by xol_get_insn_slot().
85 	 */
86 	struct arch_uprobe	arch;
87 };
88 
89 /*
90  * Execute out of line area: anonymous executable mapping installed
91  * by the probed task to execute the copy of the original instruction
92  * mangled by set_swbp().
93  *
94  * On a breakpoint hit, thread contests for a slot.  It frees the
95  * slot after singlestep. Currently a fixed number of slots are
96  * allocated.
97  */
98 struct xol_area {
99 	wait_queue_head_t 		wq;		/* if all slots are busy */
100 	atomic_t 			slot_count;	/* number of in-use slots */
101 	unsigned long 			*bitmap;	/* 0 = free slot */
102 
103 	struct vm_special_mapping	xol_mapping;
104 	struct page 			*pages[2];
105 	/*
106 	 * We keep the vma's vm_start rather than a pointer to the vma
107 	 * itself.  The probed process or a naughty kernel module could make
108 	 * the vma go away, and we must handle that reasonably gracefully.
109 	 */
110 	unsigned long 			vaddr;		/* Page(s) of instruction slots */
111 };
112 
113 /*
114  * valid_vma: Verify if the specified vma is an executable vma
115  * Relax restrictions while unregistering: vm_flags might have
116  * changed after breakpoint was inserted.
117  *	- is_register: indicates if we are in register context.
118  *	- Return 1 if the specified virtual address is in an
119  *	  executable vma.
120  */
valid_vma(struct vm_area_struct * vma,bool is_register)121 static bool valid_vma(struct vm_area_struct *vma, bool is_register)
122 {
123 	vm_flags_t flags = VM_HUGETLB | VM_MAYEXEC | VM_MAYSHARE;
124 
125 	if (is_register)
126 		flags |= VM_WRITE;
127 
128 	return vma->vm_file && (vma->vm_flags & flags) == VM_MAYEXEC;
129 }
130 
offset_to_vaddr(struct vm_area_struct * vma,loff_t offset)131 static unsigned long offset_to_vaddr(struct vm_area_struct *vma, loff_t offset)
132 {
133 	return vma->vm_start + offset - ((loff_t)vma->vm_pgoff << PAGE_SHIFT);
134 }
135 
vaddr_to_offset(struct vm_area_struct * vma,unsigned long vaddr)136 static loff_t vaddr_to_offset(struct vm_area_struct *vma, unsigned long vaddr)
137 {
138 	return ((loff_t)vma->vm_pgoff << PAGE_SHIFT) + (vaddr - vma->vm_start);
139 }
140 
141 /**
142  * __replace_page - replace page in vma by new page.
143  * based on replace_page in mm/ksm.c
144  *
145  * @vma:      vma that holds the pte pointing to page
146  * @addr:     address the old @page is mapped at
147  * @page:     the cowed page we are replacing by kpage
148  * @kpage:    the modified page we replace page by
149  *
150  * Returns 0 on success, -EFAULT on failure.
151  */
__replace_page(struct vm_area_struct * vma,unsigned long addr,struct page * old_page,struct page * new_page)152 static int __replace_page(struct vm_area_struct *vma, unsigned long addr,
153 				struct page *old_page, struct page *new_page)
154 {
155 	struct mm_struct *mm = vma->vm_mm;
156 	spinlock_t *ptl;
157 	pte_t *ptep;
158 	int err;
159 	/* For mmu_notifiers */
160 	const unsigned long mmun_start = addr;
161 	const unsigned long mmun_end   = addr + PAGE_SIZE;
162 	struct mem_cgroup *memcg;
163 
164 	err = mem_cgroup_try_charge(new_page, vma->vm_mm, GFP_KERNEL, &memcg,
165 			false);
166 	if (err)
167 		return err;
168 
169 	/* For try_to_free_swap() and munlock_vma_page() below */
170 	lock_page(old_page);
171 
172 	mmu_notifier_invalidate_range_start(mm, mmun_start, mmun_end);
173 	err = -EAGAIN;
174 	ptep = page_check_address(old_page, mm, addr, &ptl, 0);
175 	if (!ptep) {
176 		mem_cgroup_cancel_charge(new_page, memcg, false);
177 		goto unlock;
178 	}
179 
180 	get_page(new_page);
181 	page_add_new_anon_rmap(new_page, vma, addr, false);
182 	mem_cgroup_commit_charge(new_page, memcg, false, false);
183 	lru_cache_add_active_or_unevictable(new_page, vma);
184 
185 	if (!PageAnon(old_page)) {
186 		dec_mm_counter(mm, mm_counter_file(old_page));
187 		inc_mm_counter(mm, MM_ANONPAGES);
188 	}
189 
190 	flush_cache_page(vma, addr, pte_pfn(*ptep));
191 	ptep_clear_flush_notify(vma, addr, ptep);
192 	set_pte_at_notify(mm, addr, ptep, mk_pte(new_page, vma->vm_page_prot));
193 
194 	page_remove_rmap(old_page, false);
195 	if (!page_mapped(old_page))
196 		try_to_free_swap(old_page);
197 	pte_unmap_unlock(ptep, ptl);
198 
199 	if (vma->vm_flags & VM_LOCKED)
200 		munlock_vma_page(old_page);
201 	put_page(old_page);
202 
203 	err = 0;
204  unlock:
205 	mmu_notifier_invalidate_range_end(mm, mmun_start, mmun_end);
206 	unlock_page(old_page);
207 	return err;
208 }
209 
210 /**
211  * is_swbp_insn - check if instruction is breakpoint instruction.
212  * @insn: instruction to be checked.
213  * Default implementation of is_swbp_insn
214  * Returns true if @insn is a breakpoint instruction.
215  */
is_swbp_insn(uprobe_opcode_t * insn)216 bool __weak is_swbp_insn(uprobe_opcode_t *insn)
217 {
218 	return *insn == UPROBE_SWBP_INSN;
219 }
220 
221 /**
222  * is_trap_insn - check if instruction is breakpoint instruction.
223  * @insn: instruction to be checked.
224  * Default implementation of is_trap_insn
225  * Returns true if @insn is a breakpoint instruction.
226  *
227  * This function is needed for the case where an architecture has multiple
228  * trap instructions (like powerpc).
229  */
is_trap_insn(uprobe_opcode_t * insn)230 bool __weak is_trap_insn(uprobe_opcode_t *insn)
231 {
232 	return is_swbp_insn(insn);
233 }
234 
copy_from_page(struct page * page,unsigned long vaddr,void * dst,int len)235 static void copy_from_page(struct page *page, unsigned long vaddr, void *dst, int len)
236 {
237 	void *kaddr = kmap_atomic(page);
238 	memcpy(dst, kaddr + (vaddr & ~PAGE_MASK), len);
239 	kunmap_atomic(kaddr);
240 }
241 
copy_to_page(struct page * page,unsigned long vaddr,const void * src,int len)242 static void copy_to_page(struct page *page, unsigned long vaddr, const void *src, int len)
243 {
244 	void *kaddr = kmap_atomic(page);
245 	memcpy(kaddr + (vaddr & ~PAGE_MASK), src, len);
246 	kunmap_atomic(kaddr);
247 }
248 
verify_opcode(struct page * page,unsigned long vaddr,uprobe_opcode_t * new_opcode)249 static int verify_opcode(struct page *page, unsigned long vaddr, uprobe_opcode_t *new_opcode)
250 {
251 	uprobe_opcode_t old_opcode;
252 	bool is_swbp;
253 
254 	/*
255 	 * Note: We only check if the old_opcode is UPROBE_SWBP_INSN here.
256 	 * We do not check if it is any other 'trap variant' which could
257 	 * be conditional trap instruction such as the one powerpc supports.
258 	 *
259 	 * The logic is that we do not care if the underlying instruction
260 	 * is a trap variant; uprobes always wins over any other (gdb)
261 	 * breakpoint.
262 	 */
263 	copy_from_page(page, vaddr, &old_opcode, UPROBE_SWBP_INSN_SIZE);
264 	is_swbp = is_swbp_insn(&old_opcode);
265 
266 	if (is_swbp_insn(new_opcode)) {
267 		if (is_swbp)		/* register: already installed? */
268 			return 0;
269 	} else {
270 		if (!is_swbp)		/* unregister: was it changed by us? */
271 			return 0;
272 	}
273 
274 	return 1;
275 }
276 
277 /*
278  * NOTE:
279  * Expect the breakpoint instruction to be the smallest size instruction for
280  * the architecture. If an arch has variable length instruction and the
281  * breakpoint instruction is not of the smallest length instruction
282  * supported by that architecture then we need to modify is_trap_at_addr and
283  * uprobe_write_opcode accordingly. This would never be a problem for archs
284  * that have fixed length instructions.
285  *
286  * uprobe_write_opcode - write the opcode at a given virtual address.
287  * @mm: the probed process address space.
288  * @vaddr: the virtual address to store the opcode.
289  * @opcode: opcode to be written at @vaddr.
290  *
291  * Called with mm->mmap_sem held for write.
292  * Return 0 (success) or a negative errno.
293  */
uprobe_write_opcode(struct mm_struct * mm,unsigned long vaddr,uprobe_opcode_t opcode)294 int uprobe_write_opcode(struct mm_struct *mm, unsigned long vaddr,
295 			uprobe_opcode_t opcode)
296 {
297 	struct page *old_page, *new_page;
298 	struct vm_area_struct *vma;
299 	int ret;
300 
301 retry:
302 	/* Read the page with vaddr into memory */
303 	ret = get_user_pages_remote(NULL, mm, vaddr, 1, FOLL_FORCE, &old_page,
304 			&vma);
305 	if (ret <= 0)
306 		return ret;
307 
308 	ret = verify_opcode(old_page, vaddr, &opcode);
309 	if (ret <= 0)
310 		goto put_old;
311 
312 	ret = anon_vma_prepare(vma);
313 	if (ret)
314 		goto put_old;
315 
316 	ret = -ENOMEM;
317 	new_page = alloc_page_vma(GFP_HIGHUSER_MOVABLE, vma, vaddr);
318 	if (!new_page)
319 		goto put_old;
320 
321 	__SetPageUptodate(new_page);
322 	copy_highpage(new_page, old_page);
323 	copy_to_page(new_page, vaddr, &opcode, UPROBE_SWBP_INSN_SIZE);
324 
325 	ret = __replace_page(vma, vaddr, old_page, new_page);
326 	put_page(new_page);
327 put_old:
328 	put_page(old_page);
329 
330 	if (unlikely(ret == -EAGAIN))
331 		goto retry;
332 	return ret;
333 }
334 
335 /**
336  * set_swbp - store breakpoint at a given address.
337  * @auprobe: arch specific probepoint information.
338  * @mm: the probed process address space.
339  * @vaddr: the virtual address to insert the opcode.
340  *
341  * For mm @mm, store the breakpoint instruction at @vaddr.
342  * Return 0 (success) or a negative errno.
343  */
set_swbp(struct arch_uprobe * auprobe,struct mm_struct * mm,unsigned long vaddr)344 int __weak set_swbp(struct arch_uprobe *auprobe, struct mm_struct *mm, unsigned long vaddr)
345 {
346 	return uprobe_write_opcode(mm, vaddr, UPROBE_SWBP_INSN);
347 }
348 
349 /**
350  * set_orig_insn - Restore the original instruction.
351  * @mm: the probed process address space.
352  * @auprobe: arch specific probepoint information.
353  * @vaddr: the virtual address to insert the opcode.
354  *
355  * For mm @mm, restore the original opcode (opcode) at @vaddr.
356  * Return 0 (success) or a negative errno.
357  */
358 int __weak
set_orig_insn(struct arch_uprobe * auprobe,struct mm_struct * mm,unsigned long vaddr)359 set_orig_insn(struct arch_uprobe *auprobe, struct mm_struct *mm, unsigned long vaddr)
360 {
361 	return uprobe_write_opcode(mm, vaddr, *(uprobe_opcode_t *)&auprobe->insn);
362 }
363 
get_uprobe(struct uprobe * uprobe)364 static struct uprobe *get_uprobe(struct uprobe *uprobe)
365 {
366 	atomic_inc(&uprobe->ref);
367 	return uprobe;
368 }
369 
put_uprobe(struct uprobe * uprobe)370 static void put_uprobe(struct uprobe *uprobe)
371 {
372 	if (atomic_dec_and_test(&uprobe->ref))
373 		kfree(uprobe);
374 }
375 
match_uprobe(struct uprobe * l,struct uprobe * r)376 static int match_uprobe(struct uprobe *l, struct uprobe *r)
377 {
378 	if (l->inode < r->inode)
379 		return -1;
380 
381 	if (l->inode > r->inode)
382 		return 1;
383 
384 	if (l->offset < r->offset)
385 		return -1;
386 
387 	if (l->offset > r->offset)
388 		return 1;
389 
390 	return 0;
391 }
392 
__find_uprobe(struct inode * inode,loff_t offset)393 static struct uprobe *__find_uprobe(struct inode *inode, loff_t offset)
394 {
395 	struct uprobe u = { .inode = inode, .offset = offset };
396 	struct rb_node *n = uprobes_tree.rb_node;
397 	struct uprobe *uprobe;
398 	int match;
399 
400 	while (n) {
401 		uprobe = rb_entry(n, struct uprobe, rb_node);
402 		match = match_uprobe(&u, uprobe);
403 		if (!match)
404 			return get_uprobe(uprobe);
405 
406 		if (match < 0)
407 			n = n->rb_left;
408 		else
409 			n = n->rb_right;
410 	}
411 	return NULL;
412 }
413 
414 /*
415  * Find a uprobe corresponding to a given inode:offset
416  * Acquires uprobes_treelock
417  */
find_uprobe(struct inode * inode,loff_t offset)418 static struct uprobe *find_uprobe(struct inode *inode, loff_t offset)
419 {
420 	struct uprobe *uprobe;
421 
422 	spin_lock(&uprobes_treelock);
423 	uprobe = __find_uprobe(inode, offset);
424 	spin_unlock(&uprobes_treelock);
425 
426 	return uprobe;
427 }
428 
__insert_uprobe(struct uprobe * uprobe)429 static struct uprobe *__insert_uprobe(struct uprobe *uprobe)
430 {
431 	struct rb_node **p = &uprobes_tree.rb_node;
432 	struct rb_node *parent = NULL;
433 	struct uprobe *u;
434 	int match;
435 
436 	while (*p) {
437 		parent = *p;
438 		u = rb_entry(parent, struct uprobe, rb_node);
439 		match = match_uprobe(uprobe, u);
440 		if (!match)
441 			return get_uprobe(u);
442 
443 		if (match < 0)
444 			p = &parent->rb_left;
445 		else
446 			p = &parent->rb_right;
447 
448 	}
449 
450 	u = NULL;
451 	rb_link_node(&uprobe->rb_node, parent, p);
452 	rb_insert_color(&uprobe->rb_node, &uprobes_tree);
453 	/* get access + creation ref */
454 	atomic_set(&uprobe->ref, 2);
455 
456 	return u;
457 }
458 
459 /*
460  * Acquire uprobes_treelock.
461  * Matching uprobe already exists in rbtree;
462  *	increment (access refcount) and return the matching uprobe.
463  *
464  * No matching uprobe; insert the uprobe in rb_tree;
465  *	get a double refcount (access + creation) and return NULL.
466  */
insert_uprobe(struct uprobe * uprobe)467 static struct uprobe *insert_uprobe(struct uprobe *uprobe)
468 {
469 	struct uprobe *u;
470 
471 	spin_lock(&uprobes_treelock);
472 	u = __insert_uprobe(uprobe);
473 	spin_unlock(&uprobes_treelock);
474 
475 	return u;
476 }
477 
alloc_uprobe(struct inode * inode,loff_t offset)478 static struct uprobe *alloc_uprobe(struct inode *inode, loff_t offset)
479 {
480 	struct uprobe *uprobe, *cur_uprobe;
481 
482 	uprobe = kzalloc(sizeof(struct uprobe), GFP_KERNEL);
483 	if (!uprobe)
484 		return NULL;
485 
486 	uprobe->inode = igrab(inode);
487 	uprobe->offset = offset;
488 	init_rwsem(&uprobe->register_rwsem);
489 	init_rwsem(&uprobe->consumer_rwsem);
490 
491 	/* add to uprobes_tree, sorted on inode:offset */
492 	cur_uprobe = insert_uprobe(uprobe);
493 	/* a uprobe exists for this inode:offset combination */
494 	if (cur_uprobe) {
495 		kfree(uprobe);
496 		uprobe = cur_uprobe;
497 		iput(inode);
498 	}
499 
500 	return uprobe;
501 }
502 
consumer_add(struct uprobe * uprobe,struct uprobe_consumer * uc)503 static void consumer_add(struct uprobe *uprobe, struct uprobe_consumer *uc)
504 {
505 	down_write(&uprobe->consumer_rwsem);
506 	uc->next = uprobe->consumers;
507 	uprobe->consumers = uc;
508 	up_write(&uprobe->consumer_rwsem);
509 }
510 
511 /*
512  * For uprobe @uprobe, delete the consumer @uc.
513  * Return true if the @uc is deleted successfully
514  * or return false.
515  */
consumer_del(struct uprobe * uprobe,struct uprobe_consumer * uc)516 static bool consumer_del(struct uprobe *uprobe, struct uprobe_consumer *uc)
517 {
518 	struct uprobe_consumer **con;
519 	bool ret = false;
520 
521 	down_write(&uprobe->consumer_rwsem);
522 	for (con = &uprobe->consumers; *con; con = &(*con)->next) {
523 		if (*con == uc) {
524 			*con = uc->next;
525 			ret = true;
526 			break;
527 		}
528 	}
529 	up_write(&uprobe->consumer_rwsem);
530 
531 	return ret;
532 }
533 
__copy_insn(struct address_space * mapping,struct file * filp,void * insn,int nbytes,loff_t offset)534 static int __copy_insn(struct address_space *mapping, struct file *filp,
535 			void *insn, int nbytes, loff_t offset)
536 {
537 	struct page *page;
538 	/*
539 	 * Ensure that the page that has the original instruction is populated
540 	 * and in page-cache. If ->readpage == NULL it must be shmem_mapping(),
541 	 * see uprobe_register().
542 	 */
543 	if (mapping->a_ops->readpage)
544 		page = read_mapping_page(mapping, offset >> PAGE_SHIFT, filp);
545 	else
546 		page = shmem_read_mapping_page(mapping, offset >> PAGE_SHIFT);
547 	if (IS_ERR(page))
548 		return PTR_ERR(page);
549 
550 	copy_from_page(page, offset, insn, nbytes);
551 	put_page(page);
552 
553 	return 0;
554 }
555 
copy_insn(struct uprobe * uprobe,struct file * filp)556 static int copy_insn(struct uprobe *uprobe, struct file *filp)
557 {
558 	struct address_space *mapping = uprobe->inode->i_mapping;
559 	loff_t offs = uprobe->offset;
560 	void *insn = &uprobe->arch.insn;
561 	int size = sizeof(uprobe->arch.insn);
562 	int len, err = -EIO;
563 
564 	/* Copy only available bytes, -EIO if nothing was read */
565 	do {
566 		if (offs >= i_size_read(uprobe->inode))
567 			break;
568 
569 		len = min_t(int, size, PAGE_SIZE - (offs & ~PAGE_MASK));
570 		err = __copy_insn(mapping, filp, insn, len, offs);
571 		if (err)
572 			break;
573 
574 		insn += len;
575 		offs += len;
576 		size -= len;
577 	} while (size);
578 
579 	return err;
580 }
581 
prepare_uprobe(struct uprobe * uprobe,struct file * file,struct mm_struct * mm,unsigned long vaddr)582 static int prepare_uprobe(struct uprobe *uprobe, struct file *file,
583 				struct mm_struct *mm, unsigned long vaddr)
584 {
585 	int ret = 0;
586 
587 	if (test_bit(UPROBE_COPY_INSN, &uprobe->flags))
588 		return ret;
589 
590 	/* TODO: move this into _register, until then we abuse this sem. */
591 	down_write(&uprobe->consumer_rwsem);
592 	if (test_bit(UPROBE_COPY_INSN, &uprobe->flags))
593 		goto out;
594 
595 	ret = copy_insn(uprobe, file);
596 	if (ret)
597 		goto out;
598 
599 	ret = -ENOTSUPP;
600 	if (is_trap_insn((uprobe_opcode_t *)&uprobe->arch.insn))
601 		goto out;
602 
603 	ret = arch_uprobe_analyze_insn(&uprobe->arch, mm, vaddr);
604 	if (ret)
605 		goto out;
606 
607 	/* uprobe_write_opcode() assumes we don't cross page boundary */
608 	BUG_ON((uprobe->offset & ~PAGE_MASK) +
609 			UPROBE_SWBP_INSN_SIZE > PAGE_SIZE);
610 
611 	smp_wmb(); /* pairs with rmb() in find_active_uprobe() */
612 	set_bit(UPROBE_COPY_INSN, &uprobe->flags);
613 
614  out:
615 	up_write(&uprobe->consumer_rwsem);
616 
617 	return ret;
618 }
619 
consumer_filter(struct uprobe_consumer * uc,enum uprobe_filter_ctx ctx,struct mm_struct * mm)620 static inline bool consumer_filter(struct uprobe_consumer *uc,
621 				   enum uprobe_filter_ctx ctx, struct mm_struct *mm)
622 {
623 	return !uc->filter || uc->filter(uc, ctx, mm);
624 }
625 
filter_chain(struct uprobe * uprobe,enum uprobe_filter_ctx ctx,struct mm_struct * mm)626 static bool filter_chain(struct uprobe *uprobe,
627 			 enum uprobe_filter_ctx ctx, struct mm_struct *mm)
628 {
629 	struct uprobe_consumer *uc;
630 	bool ret = false;
631 
632 	down_read(&uprobe->consumer_rwsem);
633 	for (uc = uprobe->consumers; uc; uc = uc->next) {
634 		ret = consumer_filter(uc, ctx, mm);
635 		if (ret)
636 			break;
637 	}
638 	up_read(&uprobe->consumer_rwsem);
639 
640 	return ret;
641 }
642 
643 static int
install_breakpoint(struct uprobe * uprobe,struct mm_struct * mm,struct vm_area_struct * vma,unsigned long vaddr)644 install_breakpoint(struct uprobe *uprobe, struct mm_struct *mm,
645 			struct vm_area_struct *vma, unsigned long vaddr)
646 {
647 	bool first_uprobe;
648 	int ret;
649 
650 	ret = prepare_uprobe(uprobe, vma->vm_file, mm, vaddr);
651 	if (ret)
652 		return ret;
653 
654 	/*
655 	 * set MMF_HAS_UPROBES in advance for uprobe_pre_sstep_notifier(),
656 	 * the task can hit this breakpoint right after __replace_page().
657 	 */
658 	first_uprobe = !test_bit(MMF_HAS_UPROBES, &mm->flags);
659 	if (first_uprobe)
660 		set_bit(MMF_HAS_UPROBES, &mm->flags);
661 
662 	ret = set_swbp(&uprobe->arch, mm, vaddr);
663 	if (!ret)
664 		clear_bit(MMF_RECALC_UPROBES, &mm->flags);
665 	else if (first_uprobe)
666 		clear_bit(MMF_HAS_UPROBES, &mm->flags);
667 
668 	return ret;
669 }
670 
671 static int
remove_breakpoint(struct uprobe * uprobe,struct mm_struct * mm,unsigned long vaddr)672 remove_breakpoint(struct uprobe *uprobe, struct mm_struct *mm, unsigned long vaddr)
673 {
674 	set_bit(MMF_RECALC_UPROBES, &mm->flags);
675 	return set_orig_insn(&uprobe->arch, mm, vaddr);
676 }
677 
uprobe_is_active(struct uprobe * uprobe)678 static inline bool uprobe_is_active(struct uprobe *uprobe)
679 {
680 	return !RB_EMPTY_NODE(&uprobe->rb_node);
681 }
682 /*
683  * There could be threads that have already hit the breakpoint. They
684  * will recheck the current insn and restart if find_uprobe() fails.
685  * See find_active_uprobe().
686  */
delete_uprobe(struct uprobe * uprobe)687 static void delete_uprobe(struct uprobe *uprobe)
688 {
689 	if (WARN_ON(!uprobe_is_active(uprobe)))
690 		return;
691 
692 	spin_lock(&uprobes_treelock);
693 	rb_erase(&uprobe->rb_node, &uprobes_tree);
694 	spin_unlock(&uprobes_treelock);
695 	RB_CLEAR_NODE(&uprobe->rb_node); /* for uprobe_is_active() */
696 	iput(uprobe->inode);
697 	put_uprobe(uprobe);
698 }
699 
700 struct map_info {
701 	struct map_info *next;
702 	struct mm_struct *mm;
703 	unsigned long vaddr;
704 };
705 
free_map_info(struct map_info * info)706 static inline struct map_info *free_map_info(struct map_info *info)
707 {
708 	struct map_info *next = info->next;
709 	kfree(info);
710 	return next;
711 }
712 
713 static struct map_info *
build_map_info(struct address_space * mapping,loff_t offset,bool is_register)714 build_map_info(struct address_space *mapping, loff_t offset, bool is_register)
715 {
716 	unsigned long pgoff = offset >> PAGE_SHIFT;
717 	struct vm_area_struct *vma;
718 	struct map_info *curr = NULL;
719 	struct map_info *prev = NULL;
720 	struct map_info *info;
721 	int more = 0;
722 
723  again:
724 	i_mmap_lock_read(mapping);
725 	vma_interval_tree_foreach(vma, &mapping->i_mmap, pgoff, pgoff) {
726 		if (!valid_vma(vma, is_register))
727 			continue;
728 
729 		if (!prev && !more) {
730 			/*
731 			 * Needs GFP_NOWAIT to avoid i_mmap_rwsem recursion through
732 			 * reclaim. This is optimistic, no harm done if it fails.
733 			 */
734 			prev = kmalloc(sizeof(struct map_info),
735 					GFP_NOWAIT | __GFP_NOMEMALLOC | __GFP_NOWARN);
736 			if (prev)
737 				prev->next = NULL;
738 		}
739 		if (!prev) {
740 			more++;
741 			continue;
742 		}
743 
744 		if (!atomic_inc_not_zero(&vma->vm_mm->mm_users))
745 			continue;
746 
747 		info = prev;
748 		prev = prev->next;
749 		info->next = curr;
750 		curr = info;
751 
752 		info->mm = vma->vm_mm;
753 		info->vaddr = offset_to_vaddr(vma, offset);
754 	}
755 	i_mmap_unlock_read(mapping);
756 
757 	if (!more)
758 		goto out;
759 
760 	prev = curr;
761 	while (curr) {
762 		mmput(curr->mm);
763 		curr = curr->next;
764 	}
765 
766 	do {
767 		info = kmalloc(sizeof(struct map_info), GFP_KERNEL);
768 		if (!info) {
769 			curr = ERR_PTR(-ENOMEM);
770 			goto out;
771 		}
772 		info->next = prev;
773 		prev = info;
774 	} while (--more);
775 
776 	goto again;
777  out:
778 	while (prev)
779 		prev = free_map_info(prev);
780 	return curr;
781 }
782 
783 static int
register_for_each_vma(struct uprobe * uprobe,struct uprobe_consumer * new)784 register_for_each_vma(struct uprobe *uprobe, struct uprobe_consumer *new)
785 {
786 	bool is_register = !!new;
787 	struct map_info *info;
788 	int err = 0;
789 
790 	percpu_down_write(&dup_mmap_sem);
791 	info = build_map_info(uprobe->inode->i_mapping,
792 					uprobe->offset, is_register);
793 	if (IS_ERR(info)) {
794 		err = PTR_ERR(info);
795 		goto out;
796 	}
797 
798 	while (info) {
799 		struct mm_struct *mm = info->mm;
800 		struct vm_area_struct *vma;
801 
802 		if (err && is_register)
803 			goto free;
804 
805 		down_write(&mm->mmap_sem);
806 		vma = find_vma(mm, info->vaddr);
807 		if (!vma || !valid_vma(vma, is_register) ||
808 		    file_inode(vma->vm_file) != uprobe->inode)
809 			goto unlock;
810 
811 		if (vma->vm_start > info->vaddr ||
812 		    vaddr_to_offset(vma, info->vaddr) != uprobe->offset)
813 			goto unlock;
814 
815 		if (is_register) {
816 			/* consult only the "caller", new consumer. */
817 			if (consumer_filter(new,
818 					UPROBE_FILTER_REGISTER, mm))
819 				err = install_breakpoint(uprobe, mm, vma, info->vaddr);
820 		} else if (test_bit(MMF_HAS_UPROBES, &mm->flags)) {
821 			if (!filter_chain(uprobe,
822 					UPROBE_FILTER_UNREGISTER, mm))
823 				err |= remove_breakpoint(uprobe, mm, info->vaddr);
824 		}
825 
826  unlock:
827 		up_write(&mm->mmap_sem);
828  free:
829 		mmput(mm);
830 		info = free_map_info(info);
831 	}
832  out:
833 	percpu_up_write(&dup_mmap_sem);
834 	return err;
835 }
836 
__uprobe_register(struct uprobe * uprobe,struct uprobe_consumer * uc)837 static int __uprobe_register(struct uprobe *uprobe, struct uprobe_consumer *uc)
838 {
839 	consumer_add(uprobe, uc);
840 	return register_for_each_vma(uprobe, uc);
841 }
842 
__uprobe_unregister(struct uprobe * uprobe,struct uprobe_consumer * uc)843 static void __uprobe_unregister(struct uprobe *uprobe, struct uprobe_consumer *uc)
844 {
845 	int err;
846 
847 	if (WARN_ON(!consumer_del(uprobe, uc)))
848 		return;
849 
850 	err = register_for_each_vma(uprobe, NULL);
851 	/* TODO : cant unregister? schedule a worker thread */
852 	if (!uprobe->consumers && !err)
853 		delete_uprobe(uprobe);
854 }
855 
856 /*
857  * uprobe_register - register a probe
858  * @inode: the file in which the probe has to be placed.
859  * @offset: offset from the start of the file.
860  * @uc: information on howto handle the probe..
861  *
862  * Apart from the access refcount, uprobe_register() takes a creation
863  * refcount (thro alloc_uprobe) if and only if this @uprobe is getting
864  * inserted into the rbtree (i.e first consumer for a @inode:@offset
865  * tuple).  Creation refcount stops uprobe_unregister from freeing the
866  * @uprobe even before the register operation is complete. Creation
867  * refcount is released when the last @uc for the @uprobe
868  * unregisters.
869  *
870  * Return errno if it cannot successully install probes
871  * else return 0 (success)
872  */
uprobe_register(struct inode * inode,loff_t offset,struct uprobe_consumer * uc)873 int uprobe_register(struct inode *inode, loff_t offset, struct uprobe_consumer *uc)
874 {
875 	struct uprobe *uprobe;
876 	int ret;
877 
878 	/* Uprobe must have at least one set consumer */
879 	if (!uc->handler && !uc->ret_handler)
880 		return -EINVAL;
881 
882 	/* copy_insn() uses read_mapping_page() or shmem_read_mapping_page() */
883 	if (!inode->i_mapping->a_ops->readpage && !shmem_mapping(inode->i_mapping))
884 		return -EIO;
885 	/* Racy, just to catch the obvious mistakes */
886 	if (offset > i_size_read(inode))
887 		return -EINVAL;
888 
889  retry:
890 	uprobe = alloc_uprobe(inode, offset);
891 	if (!uprobe)
892 		return -ENOMEM;
893 	/*
894 	 * We can race with uprobe_unregister()->delete_uprobe().
895 	 * Check uprobe_is_active() and retry if it is false.
896 	 */
897 	down_write(&uprobe->register_rwsem);
898 	ret = -EAGAIN;
899 	if (likely(uprobe_is_active(uprobe))) {
900 		ret = __uprobe_register(uprobe, uc);
901 		if (ret)
902 			__uprobe_unregister(uprobe, uc);
903 	}
904 	up_write(&uprobe->register_rwsem);
905 	put_uprobe(uprobe);
906 
907 	if (unlikely(ret == -EAGAIN))
908 		goto retry;
909 	return ret;
910 }
911 EXPORT_SYMBOL_GPL(uprobe_register);
912 
913 /*
914  * uprobe_apply - unregister a already registered probe.
915  * @inode: the file in which the probe has to be removed.
916  * @offset: offset from the start of the file.
917  * @uc: consumer which wants to add more or remove some breakpoints
918  * @add: add or remove the breakpoints
919  */
uprobe_apply(struct inode * inode,loff_t offset,struct uprobe_consumer * uc,bool add)920 int uprobe_apply(struct inode *inode, loff_t offset,
921 			struct uprobe_consumer *uc, bool add)
922 {
923 	struct uprobe *uprobe;
924 	struct uprobe_consumer *con;
925 	int ret = -ENOENT;
926 
927 	uprobe = find_uprobe(inode, offset);
928 	if (WARN_ON(!uprobe))
929 		return ret;
930 
931 	down_write(&uprobe->register_rwsem);
932 	for (con = uprobe->consumers; con && con != uc ; con = con->next)
933 		;
934 	if (con)
935 		ret = register_for_each_vma(uprobe, add ? uc : NULL);
936 	up_write(&uprobe->register_rwsem);
937 	put_uprobe(uprobe);
938 
939 	return ret;
940 }
941 
942 /*
943  * uprobe_unregister - unregister a already registered probe.
944  * @inode: the file in which the probe has to be removed.
945  * @offset: offset from the start of the file.
946  * @uc: identify which probe if multiple probes are colocated.
947  */
uprobe_unregister(struct inode * inode,loff_t offset,struct uprobe_consumer * uc)948 void uprobe_unregister(struct inode *inode, loff_t offset, struct uprobe_consumer *uc)
949 {
950 	struct uprobe *uprobe;
951 
952 	uprobe = find_uprobe(inode, offset);
953 	if (WARN_ON(!uprobe))
954 		return;
955 
956 	down_write(&uprobe->register_rwsem);
957 	__uprobe_unregister(uprobe, uc);
958 	up_write(&uprobe->register_rwsem);
959 	put_uprobe(uprobe);
960 }
961 EXPORT_SYMBOL_GPL(uprobe_unregister);
962 
unapply_uprobe(struct uprobe * uprobe,struct mm_struct * mm)963 static int unapply_uprobe(struct uprobe *uprobe, struct mm_struct *mm)
964 {
965 	struct vm_area_struct *vma;
966 	int err = 0;
967 
968 	down_read(&mm->mmap_sem);
969 	for (vma = mm->mmap; vma; vma = vma->vm_next) {
970 		unsigned long vaddr;
971 		loff_t offset;
972 
973 		if (!valid_vma(vma, false) ||
974 		    file_inode(vma->vm_file) != uprobe->inode)
975 			continue;
976 
977 		offset = (loff_t)vma->vm_pgoff << PAGE_SHIFT;
978 		if (uprobe->offset <  offset ||
979 		    uprobe->offset >= offset + vma->vm_end - vma->vm_start)
980 			continue;
981 
982 		vaddr = offset_to_vaddr(vma, uprobe->offset);
983 		err |= remove_breakpoint(uprobe, mm, vaddr);
984 	}
985 	up_read(&mm->mmap_sem);
986 
987 	return err;
988 }
989 
990 static struct rb_node *
find_node_in_range(struct inode * inode,loff_t min,loff_t max)991 find_node_in_range(struct inode *inode, loff_t min, loff_t max)
992 {
993 	struct rb_node *n = uprobes_tree.rb_node;
994 
995 	while (n) {
996 		struct uprobe *u = rb_entry(n, struct uprobe, rb_node);
997 
998 		if (inode < u->inode) {
999 			n = n->rb_left;
1000 		} else if (inode > u->inode) {
1001 			n = n->rb_right;
1002 		} else {
1003 			if (max < u->offset)
1004 				n = n->rb_left;
1005 			else if (min > u->offset)
1006 				n = n->rb_right;
1007 			else
1008 				break;
1009 		}
1010 	}
1011 
1012 	return n;
1013 }
1014 
1015 /*
1016  * For a given range in vma, build a list of probes that need to be inserted.
1017  */
build_probe_list(struct inode * inode,struct vm_area_struct * vma,unsigned long start,unsigned long end,struct list_head * head)1018 static void build_probe_list(struct inode *inode,
1019 				struct vm_area_struct *vma,
1020 				unsigned long start, unsigned long end,
1021 				struct list_head *head)
1022 {
1023 	loff_t min, max;
1024 	struct rb_node *n, *t;
1025 	struct uprobe *u;
1026 
1027 	INIT_LIST_HEAD(head);
1028 	min = vaddr_to_offset(vma, start);
1029 	max = min + (end - start) - 1;
1030 
1031 	spin_lock(&uprobes_treelock);
1032 	n = find_node_in_range(inode, min, max);
1033 	if (n) {
1034 		for (t = n; t; t = rb_prev(t)) {
1035 			u = rb_entry(t, struct uprobe, rb_node);
1036 			if (u->inode != inode || u->offset < min)
1037 				break;
1038 			list_add(&u->pending_list, head);
1039 			get_uprobe(u);
1040 		}
1041 		for (t = n; (t = rb_next(t)); ) {
1042 			u = rb_entry(t, struct uprobe, rb_node);
1043 			if (u->inode != inode || u->offset > max)
1044 				break;
1045 			list_add(&u->pending_list, head);
1046 			get_uprobe(u);
1047 		}
1048 	}
1049 	spin_unlock(&uprobes_treelock);
1050 }
1051 
1052 /*
1053  * Called from mmap_region/vma_adjust with mm->mmap_sem acquired.
1054  *
1055  * Currently we ignore all errors and always return 0, the callers
1056  * can't handle the failure anyway.
1057  */
uprobe_mmap(struct vm_area_struct * vma)1058 int uprobe_mmap(struct vm_area_struct *vma)
1059 {
1060 	struct list_head tmp_list;
1061 	struct uprobe *uprobe, *u;
1062 	struct inode *inode;
1063 
1064 	if (no_uprobe_events() || !valid_vma(vma, true))
1065 		return 0;
1066 
1067 	inode = file_inode(vma->vm_file);
1068 	if (!inode)
1069 		return 0;
1070 
1071 	mutex_lock(uprobes_mmap_hash(inode));
1072 	build_probe_list(inode, vma, vma->vm_start, vma->vm_end, &tmp_list);
1073 	/*
1074 	 * We can race with uprobe_unregister(), this uprobe can be already
1075 	 * removed. But in this case filter_chain() must return false, all
1076 	 * consumers have gone away.
1077 	 */
1078 	list_for_each_entry_safe(uprobe, u, &tmp_list, pending_list) {
1079 		if (!fatal_signal_pending(current) &&
1080 		    filter_chain(uprobe, UPROBE_FILTER_MMAP, vma->vm_mm)) {
1081 			unsigned long vaddr = offset_to_vaddr(vma, uprobe->offset);
1082 			install_breakpoint(uprobe, vma->vm_mm, vma, vaddr);
1083 		}
1084 		put_uprobe(uprobe);
1085 	}
1086 	mutex_unlock(uprobes_mmap_hash(inode));
1087 
1088 	return 0;
1089 }
1090 
1091 static bool
vma_has_uprobes(struct vm_area_struct * vma,unsigned long start,unsigned long end)1092 vma_has_uprobes(struct vm_area_struct *vma, unsigned long start, unsigned long end)
1093 {
1094 	loff_t min, max;
1095 	struct inode *inode;
1096 	struct rb_node *n;
1097 
1098 	inode = file_inode(vma->vm_file);
1099 
1100 	min = vaddr_to_offset(vma, start);
1101 	max = min + (end - start) - 1;
1102 
1103 	spin_lock(&uprobes_treelock);
1104 	n = find_node_in_range(inode, min, max);
1105 	spin_unlock(&uprobes_treelock);
1106 
1107 	return !!n;
1108 }
1109 
1110 /*
1111  * Called in context of a munmap of a vma.
1112  */
uprobe_munmap(struct vm_area_struct * vma,unsigned long start,unsigned long end)1113 void uprobe_munmap(struct vm_area_struct *vma, unsigned long start, unsigned long end)
1114 {
1115 	if (no_uprobe_events() || !valid_vma(vma, false))
1116 		return;
1117 
1118 	if (!atomic_read(&vma->vm_mm->mm_users)) /* called by mmput() ? */
1119 		return;
1120 
1121 	if (!test_bit(MMF_HAS_UPROBES, &vma->vm_mm->flags) ||
1122 	     test_bit(MMF_RECALC_UPROBES, &vma->vm_mm->flags))
1123 		return;
1124 
1125 	if (vma_has_uprobes(vma, start, end))
1126 		set_bit(MMF_RECALC_UPROBES, &vma->vm_mm->flags);
1127 }
1128 
1129 /* Slot allocation for XOL */
xol_add_vma(struct mm_struct * mm,struct xol_area * area)1130 static int xol_add_vma(struct mm_struct *mm, struct xol_area *area)
1131 {
1132 	struct vm_area_struct *vma;
1133 	int ret;
1134 
1135 	if (down_write_killable(&mm->mmap_sem))
1136 		return -EINTR;
1137 
1138 	if (mm->uprobes_state.xol_area) {
1139 		ret = -EALREADY;
1140 		goto fail;
1141 	}
1142 
1143 	if (!area->vaddr) {
1144 		/* Try to map as high as possible, this is only a hint. */
1145 		area->vaddr = get_unmapped_area(NULL, TASK_SIZE - PAGE_SIZE,
1146 						PAGE_SIZE, 0, 0);
1147 		if (area->vaddr & ~PAGE_MASK) {
1148 			ret = area->vaddr;
1149 			goto fail;
1150 		}
1151 	}
1152 
1153 	vma = _install_special_mapping(mm, area->vaddr, PAGE_SIZE,
1154 				VM_EXEC|VM_MAYEXEC|VM_DONTCOPY|VM_IO,
1155 				&area->xol_mapping);
1156 	if (IS_ERR(vma)) {
1157 		ret = PTR_ERR(vma);
1158 		goto fail;
1159 	}
1160 
1161 	ret = 0;
1162 	smp_wmb();	/* pairs with get_xol_area() */
1163 	mm->uprobes_state.xol_area = area;
1164  fail:
1165 	up_write(&mm->mmap_sem);
1166 
1167 	return ret;
1168 }
1169 
__create_xol_area(unsigned long vaddr)1170 static struct xol_area *__create_xol_area(unsigned long vaddr)
1171 {
1172 	struct mm_struct *mm = current->mm;
1173 	uprobe_opcode_t insn = UPROBE_SWBP_INSN;
1174 	struct xol_area *area;
1175 
1176 	area = kmalloc(sizeof(*area), GFP_KERNEL);
1177 	if (unlikely(!area))
1178 		goto out;
1179 
1180 	area->bitmap = kzalloc(BITS_TO_LONGS(UINSNS_PER_PAGE) * sizeof(long), GFP_KERNEL);
1181 	if (!area->bitmap)
1182 		goto free_area;
1183 
1184 	area->xol_mapping.name = "[uprobes]";
1185 	area->xol_mapping.fault = NULL;
1186 	area->xol_mapping.pages = area->pages;
1187 	area->pages[0] = alloc_page(GFP_HIGHUSER);
1188 	if (!area->pages[0])
1189 		goto free_bitmap;
1190 	area->pages[1] = NULL;
1191 
1192 	area->vaddr = vaddr;
1193 	init_waitqueue_head(&area->wq);
1194 	/* Reserve the 1st slot for get_trampoline_vaddr() */
1195 	set_bit(0, area->bitmap);
1196 	atomic_set(&area->slot_count, 1);
1197 	copy_to_page(area->pages[0], 0, &insn, UPROBE_SWBP_INSN_SIZE);
1198 
1199 	if (!xol_add_vma(mm, area))
1200 		return area;
1201 
1202 	__free_page(area->pages[0]);
1203  free_bitmap:
1204 	kfree(area->bitmap);
1205  free_area:
1206 	kfree(area);
1207  out:
1208 	return NULL;
1209 }
1210 
1211 /*
1212  * get_xol_area - Allocate process's xol_area if necessary.
1213  * This area will be used for storing instructions for execution out of line.
1214  *
1215  * Returns the allocated area or NULL.
1216  */
get_xol_area(void)1217 static struct xol_area *get_xol_area(void)
1218 {
1219 	struct mm_struct *mm = current->mm;
1220 	struct xol_area *area;
1221 
1222 	if (!mm->uprobes_state.xol_area)
1223 		__create_xol_area(0);
1224 
1225 	area = mm->uprobes_state.xol_area;
1226 	smp_read_barrier_depends();	/* pairs with wmb in xol_add_vma() */
1227 	return area;
1228 }
1229 
1230 /*
1231  * uprobe_clear_state - Free the area allocated for slots.
1232  */
uprobe_clear_state(struct mm_struct * mm)1233 void uprobe_clear_state(struct mm_struct *mm)
1234 {
1235 	struct xol_area *area = mm->uprobes_state.xol_area;
1236 
1237 	if (!area)
1238 		return;
1239 
1240 	put_page(area->pages[0]);
1241 	kfree(area->bitmap);
1242 	kfree(area);
1243 }
1244 
uprobe_start_dup_mmap(void)1245 void uprobe_start_dup_mmap(void)
1246 {
1247 	percpu_down_read(&dup_mmap_sem);
1248 }
1249 
uprobe_end_dup_mmap(void)1250 void uprobe_end_dup_mmap(void)
1251 {
1252 	percpu_up_read(&dup_mmap_sem);
1253 }
1254 
uprobe_dup_mmap(struct mm_struct * oldmm,struct mm_struct * newmm)1255 void uprobe_dup_mmap(struct mm_struct *oldmm, struct mm_struct *newmm)
1256 {
1257 	if (test_bit(MMF_HAS_UPROBES, &oldmm->flags)) {
1258 		set_bit(MMF_HAS_UPROBES, &newmm->flags);
1259 		/* unconditionally, dup_mmap() skips VM_DONTCOPY vmas */
1260 		set_bit(MMF_RECALC_UPROBES, &newmm->flags);
1261 	}
1262 }
1263 
1264 /*
1265  *  - search for a free slot.
1266  */
xol_take_insn_slot(struct xol_area * area)1267 static unsigned long xol_take_insn_slot(struct xol_area *area)
1268 {
1269 	unsigned long slot_addr;
1270 	int slot_nr;
1271 
1272 	do {
1273 		slot_nr = find_first_zero_bit(area->bitmap, UINSNS_PER_PAGE);
1274 		if (slot_nr < UINSNS_PER_PAGE) {
1275 			if (!test_and_set_bit(slot_nr, area->bitmap))
1276 				break;
1277 
1278 			slot_nr = UINSNS_PER_PAGE;
1279 			continue;
1280 		}
1281 		wait_event(area->wq, (atomic_read(&area->slot_count) < UINSNS_PER_PAGE));
1282 	} while (slot_nr >= UINSNS_PER_PAGE);
1283 
1284 	slot_addr = area->vaddr + (slot_nr * UPROBE_XOL_SLOT_BYTES);
1285 	atomic_inc(&area->slot_count);
1286 
1287 	return slot_addr;
1288 }
1289 
1290 /*
1291  * xol_get_insn_slot - allocate a slot for xol.
1292  * Returns the allocated slot address or 0.
1293  */
xol_get_insn_slot(struct uprobe * uprobe)1294 static unsigned long xol_get_insn_slot(struct uprobe *uprobe)
1295 {
1296 	struct xol_area *area;
1297 	unsigned long xol_vaddr;
1298 
1299 	area = get_xol_area();
1300 	if (!area)
1301 		return 0;
1302 
1303 	xol_vaddr = xol_take_insn_slot(area);
1304 	if (unlikely(!xol_vaddr))
1305 		return 0;
1306 
1307 	arch_uprobe_copy_ixol(area->pages[0], xol_vaddr,
1308 			      &uprobe->arch.ixol, sizeof(uprobe->arch.ixol));
1309 
1310 	return xol_vaddr;
1311 }
1312 
1313 /*
1314  * xol_free_insn_slot - If slot was earlier allocated by
1315  * @xol_get_insn_slot(), make the slot available for
1316  * subsequent requests.
1317  */
xol_free_insn_slot(struct task_struct * tsk)1318 static void xol_free_insn_slot(struct task_struct *tsk)
1319 {
1320 	struct xol_area *area;
1321 	unsigned long vma_end;
1322 	unsigned long slot_addr;
1323 
1324 	if (!tsk->mm || !tsk->mm->uprobes_state.xol_area || !tsk->utask)
1325 		return;
1326 
1327 	slot_addr = tsk->utask->xol_vaddr;
1328 	if (unlikely(!slot_addr))
1329 		return;
1330 
1331 	area = tsk->mm->uprobes_state.xol_area;
1332 	vma_end = area->vaddr + PAGE_SIZE;
1333 	if (area->vaddr <= slot_addr && slot_addr < vma_end) {
1334 		unsigned long offset;
1335 		int slot_nr;
1336 
1337 		offset = slot_addr - area->vaddr;
1338 		slot_nr = offset / UPROBE_XOL_SLOT_BYTES;
1339 		if (slot_nr >= UINSNS_PER_PAGE)
1340 			return;
1341 
1342 		clear_bit(slot_nr, area->bitmap);
1343 		atomic_dec(&area->slot_count);
1344 		smp_mb__after_atomic(); /* pairs with prepare_to_wait() */
1345 		if (waitqueue_active(&area->wq))
1346 			wake_up(&area->wq);
1347 
1348 		tsk->utask->xol_vaddr = 0;
1349 	}
1350 }
1351 
arch_uprobe_copy_ixol(struct page * page,unsigned long vaddr,void * src,unsigned long len)1352 void __weak arch_uprobe_copy_ixol(struct page *page, unsigned long vaddr,
1353 				  void *src, unsigned long len)
1354 {
1355 	/* Initialize the slot */
1356 	copy_to_page(page, vaddr, src, len);
1357 
1358 	/*
1359 	 * We probably need flush_icache_user_range() but it needs vma.
1360 	 * This should work on most of architectures by default. If
1361 	 * architecture needs to do something different it can define
1362 	 * its own version of the function.
1363 	 */
1364 	flush_dcache_page(page);
1365 }
1366 
1367 /**
1368  * uprobe_get_swbp_addr - compute address of swbp given post-swbp regs
1369  * @regs: Reflects the saved state of the task after it has hit a breakpoint
1370  * instruction.
1371  * Return the address of the breakpoint instruction.
1372  */
uprobe_get_swbp_addr(struct pt_regs * regs)1373 unsigned long __weak uprobe_get_swbp_addr(struct pt_regs *regs)
1374 {
1375 	return instruction_pointer(regs) - UPROBE_SWBP_INSN_SIZE;
1376 }
1377 
uprobe_get_trap_addr(struct pt_regs * regs)1378 unsigned long uprobe_get_trap_addr(struct pt_regs *regs)
1379 {
1380 	struct uprobe_task *utask = current->utask;
1381 
1382 	if (unlikely(utask && utask->active_uprobe))
1383 		return utask->vaddr;
1384 
1385 	return instruction_pointer(regs);
1386 }
1387 
free_ret_instance(struct return_instance * ri)1388 static struct return_instance *free_ret_instance(struct return_instance *ri)
1389 {
1390 	struct return_instance *next = ri->next;
1391 	put_uprobe(ri->uprobe);
1392 	kfree(ri);
1393 	return next;
1394 }
1395 
1396 /*
1397  * Called with no locks held.
1398  * Called in context of a exiting or a exec-ing thread.
1399  */
uprobe_free_utask(struct task_struct * t)1400 void uprobe_free_utask(struct task_struct *t)
1401 {
1402 	struct uprobe_task *utask = t->utask;
1403 	struct return_instance *ri;
1404 
1405 	if (!utask)
1406 		return;
1407 
1408 	if (utask->active_uprobe)
1409 		put_uprobe(utask->active_uprobe);
1410 
1411 	ri = utask->return_instances;
1412 	while (ri)
1413 		ri = free_ret_instance(ri);
1414 
1415 	xol_free_insn_slot(t);
1416 	kfree(utask);
1417 	t->utask = NULL;
1418 }
1419 
1420 /*
1421  * Allocate a uprobe_task object for the task if if necessary.
1422  * Called when the thread hits a breakpoint.
1423  *
1424  * Returns:
1425  * - pointer to new uprobe_task on success
1426  * - NULL otherwise
1427  */
get_utask(void)1428 static struct uprobe_task *get_utask(void)
1429 {
1430 	if (!current->utask)
1431 		current->utask = kzalloc(sizeof(struct uprobe_task), GFP_KERNEL);
1432 	return current->utask;
1433 }
1434 
dup_utask(struct task_struct * t,struct uprobe_task * o_utask)1435 static int dup_utask(struct task_struct *t, struct uprobe_task *o_utask)
1436 {
1437 	struct uprobe_task *n_utask;
1438 	struct return_instance **p, *o, *n;
1439 
1440 	n_utask = kzalloc(sizeof(struct uprobe_task), GFP_KERNEL);
1441 	if (!n_utask)
1442 		return -ENOMEM;
1443 	t->utask = n_utask;
1444 
1445 	p = &n_utask->return_instances;
1446 	for (o = o_utask->return_instances; o; o = o->next) {
1447 		n = kmalloc(sizeof(struct return_instance), GFP_KERNEL);
1448 		if (!n)
1449 			return -ENOMEM;
1450 
1451 		*n = *o;
1452 		get_uprobe(n->uprobe);
1453 		n->next = NULL;
1454 
1455 		*p = n;
1456 		p = &n->next;
1457 		n_utask->depth++;
1458 	}
1459 
1460 	return 0;
1461 }
1462 
uprobe_warn(struct task_struct * t,const char * msg)1463 static void uprobe_warn(struct task_struct *t, const char *msg)
1464 {
1465 	pr_warn("uprobe: %s:%d failed to %s\n",
1466 			current->comm, current->pid, msg);
1467 }
1468 
dup_xol_work(struct callback_head * work)1469 static void dup_xol_work(struct callback_head *work)
1470 {
1471 	if (current->flags & PF_EXITING)
1472 		return;
1473 
1474 	if (!__create_xol_area(current->utask->dup_xol_addr) &&
1475 			!fatal_signal_pending(current))
1476 		uprobe_warn(current, "dup xol area");
1477 }
1478 
1479 /*
1480  * Called in context of a new clone/fork from copy_process.
1481  */
uprobe_copy_process(struct task_struct * t,unsigned long flags)1482 void uprobe_copy_process(struct task_struct *t, unsigned long flags)
1483 {
1484 	struct uprobe_task *utask = current->utask;
1485 	struct mm_struct *mm = current->mm;
1486 	struct xol_area *area;
1487 
1488 	t->utask = NULL;
1489 
1490 	if (!utask || !utask->return_instances)
1491 		return;
1492 
1493 	if (mm == t->mm && !(flags & CLONE_VFORK))
1494 		return;
1495 
1496 	if (dup_utask(t, utask))
1497 		return uprobe_warn(t, "dup ret instances");
1498 
1499 	/* The task can fork() after dup_xol_work() fails */
1500 	area = mm->uprobes_state.xol_area;
1501 	if (!area)
1502 		return uprobe_warn(t, "dup xol area");
1503 
1504 	if (mm == t->mm)
1505 		return;
1506 
1507 	t->utask->dup_xol_addr = area->vaddr;
1508 	init_task_work(&t->utask->dup_xol_work, dup_xol_work);
1509 	task_work_add(t, &t->utask->dup_xol_work, true);
1510 }
1511 
1512 /*
1513  * Current area->vaddr notion assume the trampoline address is always
1514  * equal area->vaddr.
1515  *
1516  * Returns -1 in case the xol_area is not allocated.
1517  */
get_trampoline_vaddr(void)1518 static unsigned long get_trampoline_vaddr(void)
1519 {
1520 	struct xol_area *area;
1521 	unsigned long trampoline_vaddr = -1;
1522 
1523 	area = current->mm->uprobes_state.xol_area;
1524 	smp_read_barrier_depends();
1525 	if (area)
1526 		trampoline_vaddr = area->vaddr;
1527 
1528 	return trampoline_vaddr;
1529 }
1530 
cleanup_return_instances(struct uprobe_task * utask,bool chained,struct pt_regs * regs)1531 static void cleanup_return_instances(struct uprobe_task *utask, bool chained,
1532 					struct pt_regs *regs)
1533 {
1534 	struct return_instance *ri = utask->return_instances;
1535 	enum rp_check ctx = chained ? RP_CHECK_CHAIN_CALL : RP_CHECK_CALL;
1536 
1537 	while (ri && !arch_uretprobe_is_alive(ri, ctx, regs)) {
1538 		ri = free_ret_instance(ri);
1539 		utask->depth--;
1540 	}
1541 	utask->return_instances = ri;
1542 }
1543 
prepare_uretprobe(struct uprobe * uprobe,struct pt_regs * regs)1544 static void prepare_uretprobe(struct uprobe *uprobe, struct pt_regs *regs)
1545 {
1546 	struct return_instance *ri;
1547 	struct uprobe_task *utask;
1548 	unsigned long orig_ret_vaddr, trampoline_vaddr;
1549 	bool chained;
1550 
1551 	if (!get_xol_area())
1552 		return;
1553 
1554 	utask = get_utask();
1555 	if (!utask)
1556 		return;
1557 
1558 	if (utask->depth >= MAX_URETPROBE_DEPTH) {
1559 		printk_ratelimited(KERN_INFO "uprobe: omit uretprobe due to"
1560 				" nestedness limit pid/tgid=%d/%d\n",
1561 				current->pid, current->tgid);
1562 		return;
1563 	}
1564 
1565 	ri = kmalloc(sizeof(struct return_instance), GFP_KERNEL);
1566 	if (!ri)
1567 		return;
1568 
1569 	trampoline_vaddr = get_trampoline_vaddr();
1570 	orig_ret_vaddr = arch_uretprobe_hijack_return_addr(trampoline_vaddr, regs);
1571 	if (orig_ret_vaddr == -1)
1572 		goto fail;
1573 
1574 	/* drop the entries invalidated by longjmp() */
1575 	chained = (orig_ret_vaddr == trampoline_vaddr);
1576 	cleanup_return_instances(utask, chained, regs);
1577 
1578 	/*
1579 	 * We don't want to keep trampoline address in stack, rather keep the
1580 	 * original return address of first caller thru all the consequent
1581 	 * instances. This also makes breakpoint unwrapping easier.
1582 	 */
1583 	if (chained) {
1584 		if (!utask->return_instances) {
1585 			/*
1586 			 * This situation is not possible. Likely we have an
1587 			 * attack from user-space.
1588 			 */
1589 			uprobe_warn(current, "handle tail call");
1590 			goto fail;
1591 		}
1592 		orig_ret_vaddr = utask->return_instances->orig_ret_vaddr;
1593 	}
1594 
1595 	ri->uprobe = get_uprobe(uprobe);
1596 	ri->func = instruction_pointer(regs);
1597 	ri->stack = user_stack_pointer(regs);
1598 	ri->orig_ret_vaddr = orig_ret_vaddr;
1599 	ri->chained = chained;
1600 
1601 	utask->depth++;
1602 	ri->next = utask->return_instances;
1603 	utask->return_instances = ri;
1604 
1605 	return;
1606  fail:
1607 	kfree(ri);
1608 }
1609 
1610 /* Prepare to single-step probed instruction out of line. */
1611 static int
pre_ssout(struct uprobe * uprobe,struct pt_regs * regs,unsigned long bp_vaddr)1612 pre_ssout(struct uprobe *uprobe, struct pt_regs *regs, unsigned long bp_vaddr)
1613 {
1614 	struct uprobe_task *utask;
1615 	unsigned long xol_vaddr;
1616 	int err;
1617 
1618 	utask = get_utask();
1619 	if (!utask)
1620 		return -ENOMEM;
1621 
1622 	xol_vaddr = xol_get_insn_slot(uprobe);
1623 	if (!xol_vaddr)
1624 		return -ENOMEM;
1625 
1626 	utask->xol_vaddr = xol_vaddr;
1627 	utask->vaddr = bp_vaddr;
1628 
1629 	err = arch_uprobe_pre_xol(&uprobe->arch, regs);
1630 	if (unlikely(err)) {
1631 		xol_free_insn_slot(current);
1632 		return err;
1633 	}
1634 
1635 	utask->active_uprobe = uprobe;
1636 	utask->state = UTASK_SSTEP;
1637 	return 0;
1638 }
1639 
1640 /*
1641  * If we are singlestepping, then ensure this thread is not connected to
1642  * non-fatal signals until completion of singlestep.  When xol insn itself
1643  * triggers the signal,  restart the original insn even if the task is
1644  * already SIGKILL'ed (since coredump should report the correct ip).  This
1645  * is even more important if the task has a handler for SIGSEGV/etc, The
1646  * _same_ instruction should be repeated again after return from the signal
1647  * handler, and SSTEP can never finish in this case.
1648  */
uprobe_deny_signal(void)1649 bool uprobe_deny_signal(void)
1650 {
1651 	struct task_struct *t = current;
1652 	struct uprobe_task *utask = t->utask;
1653 
1654 	if (likely(!utask || !utask->active_uprobe))
1655 		return false;
1656 
1657 	WARN_ON_ONCE(utask->state != UTASK_SSTEP);
1658 
1659 	if (signal_pending(t)) {
1660 		spin_lock_irq(&t->sighand->siglock);
1661 		clear_tsk_thread_flag(t, TIF_SIGPENDING);
1662 		spin_unlock_irq(&t->sighand->siglock);
1663 
1664 		if (__fatal_signal_pending(t) || arch_uprobe_xol_was_trapped(t)) {
1665 			utask->state = UTASK_SSTEP_TRAPPED;
1666 			set_tsk_thread_flag(t, TIF_UPROBE);
1667 		}
1668 	}
1669 
1670 	return true;
1671 }
1672 
mmf_recalc_uprobes(struct mm_struct * mm)1673 static void mmf_recalc_uprobes(struct mm_struct *mm)
1674 {
1675 	struct vm_area_struct *vma;
1676 
1677 	for (vma = mm->mmap; vma; vma = vma->vm_next) {
1678 		if (!valid_vma(vma, false))
1679 			continue;
1680 		/*
1681 		 * This is not strictly accurate, we can race with
1682 		 * uprobe_unregister() and see the already removed
1683 		 * uprobe if delete_uprobe() was not yet called.
1684 		 * Or this uprobe can be filtered out.
1685 		 */
1686 		if (vma_has_uprobes(vma, vma->vm_start, vma->vm_end))
1687 			return;
1688 	}
1689 
1690 	clear_bit(MMF_HAS_UPROBES, &mm->flags);
1691 }
1692 
is_trap_at_addr(struct mm_struct * mm,unsigned long vaddr)1693 static int is_trap_at_addr(struct mm_struct *mm, unsigned long vaddr)
1694 {
1695 	struct page *page;
1696 	uprobe_opcode_t opcode;
1697 	int result;
1698 
1699 	pagefault_disable();
1700 	result = __get_user(opcode, (uprobe_opcode_t __user *)vaddr);
1701 	pagefault_enable();
1702 
1703 	if (likely(result == 0))
1704 		goto out;
1705 
1706 	/*
1707 	 * The NULL 'tsk' here ensures that any faults that occur here
1708 	 * will not be accounted to the task.  'mm' *is* current->mm,
1709 	 * but we treat this as a 'remote' access since it is
1710 	 * essentially a kernel access to the memory.
1711 	 */
1712 	result = get_user_pages_remote(NULL, mm, vaddr, 1, FOLL_FORCE, &page,
1713 			NULL);
1714 	if (result < 0)
1715 		return result;
1716 
1717 	copy_from_page(page, vaddr, &opcode, UPROBE_SWBP_INSN_SIZE);
1718 	put_page(page);
1719  out:
1720 	/* This needs to return true for any variant of the trap insn */
1721 	return is_trap_insn(&opcode);
1722 }
1723 
find_active_uprobe(unsigned long bp_vaddr,int * is_swbp)1724 static struct uprobe *find_active_uprobe(unsigned long bp_vaddr, int *is_swbp)
1725 {
1726 	struct mm_struct *mm = current->mm;
1727 	struct uprobe *uprobe = NULL;
1728 	struct vm_area_struct *vma;
1729 
1730 	down_read(&mm->mmap_sem);
1731 	vma = find_vma(mm, bp_vaddr);
1732 	if (vma && vma->vm_start <= bp_vaddr) {
1733 		if (valid_vma(vma, false)) {
1734 			struct inode *inode = file_inode(vma->vm_file);
1735 			loff_t offset = vaddr_to_offset(vma, bp_vaddr);
1736 
1737 			uprobe = find_uprobe(inode, offset);
1738 		}
1739 
1740 		if (!uprobe)
1741 			*is_swbp = is_trap_at_addr(mm, bp_vaddr);
1742 	} else {
1743 		*is_swbp = -EFAULT;
1744 	}
1745 
1746 	if (!uprobe && test_and_clear_bit(MMF_RECALC_UPROBES, &mm->flags))
1747 		mmf_recalc_uprobes(mm);
1748 	up_read(&mm->mmap_sem);
1749 
1750 	return uprobe;
1751 }
1752 
handler_chain(struct uprobe * uprobe,struct pt_regs * regs)1753 static void handler_chain(struct uprobe *uprobe, struct pt_regs *regs)
1754 {
1755 	struct uprobe_consumer *uc;
1756 	int remove = UPROBE_HANDLER_REMOVE;
1757 	bool need_prep = false; /* prepare return uprobe, when needed */
1758 
1759 	down_read(&uprobe->register_rwsem);
1760 	for (uc = uprobe->consumers; uc; uc = uc->next) {
1761 		int rc = 0;
1762 
1763 		if (uc->handler) {
1764 			rc = uc->handler(uc, regs);
1765 			WARN(rc & ~UPROBE_HANDLER_MASK,
1766 				"bad rc=0x%x from %pf()\n", rc, uc->handler);
1767 		}
1768 
1769 		if (uc->ret_handler)
1770 			need_prep = true;
1771 
1772 		remove &= rc;
1773 	}
1774 
1775 	if (need_prep && !remove)
1776 		prepare_uretprobe(uprobe, regs); /* put bp at return */
1777 
1778 	if (remove && uprobe->consumers) {
1779 		WARN_ON(!uprobe_is_active(uprobe));
1780 		unapply_uprobe(uprobe, current->mm);
1781 	}
1782 	up_read(&uprobe->register_rwsem);
1783 }
1784 
1785 static void
handle_uretprobe_chain(struct return_instance * ri,struct pt_regs * regs)1786 handle_uretprobe_chain(struct return_instance *ri, struct pt_regs *regs)
1787 {
1788 	struct uprobe *uprobe = ri->uprobe;
1789 	struct uprobe_consumer *uc;
1790 
1791 	down_read(&uprobe->register_rwsem);
1792 	for (uc = uprobe->consumers; uc; uc = uc->next) {
1793 		if (uc->ret_handler)
1794 			uc->ret_handler(uc, ri->func, regs);
1795 	}
1796 	up_read(&uprobe->register_rwsem);
1797 }
1798 
find_next_ret_chain(struct return_instance * ri)1799 static struct return_instance *find_next_ret_chain(struct return_instance *ri)
1800 {
1801 	bool chained;
1802 
1803 	do {
1804 		chained = ri->chained;
1805 		ri = ri->next;	/* can't be NULL if chained */
1806 	} while (chained);
1807 
1808 	return ri;
1809 }
1810 
handle_trampoline(struct pt_regs * regs)1811 static void handle_trampoline(struct pt_regs *regs)
1812 {
1813 	struct uprobe_task *utask;
1814 	struct return_instance *ri, *next;
1815 	bool valid;
1816 
1817 	utask = current->utask;
1818 	if (!utask)
1819 		goto sigill;
1820 
1821 	ri = utask->return_instances;
1822 	if (!ri)
1823 		goto sigill;
1824 
1825 	do {
1826 		/*
1827 		 * We should throw out the frames invalidated by longjmp().
1828 		 * If this chain is valid, then the next one should be alive
1829 		 * or NULL; the latter case means that nobody but ri->func
1830 		 * could hit this trampoline on return. TODO: sigaltstack().
1831 		 */
1832 		next = find_next_ret_chain(ri);
1833 		valid = !next || arch_uretprobe_is_alive(next, RP_CHECK_RET, regs);
1834 
1835 		instruction_pointer_set(regs, ri->orig_ret_vaddr);
1836 		do {
1837 			if (valid)
1838 				handle_uretprobe_chain(ri, regs);
1839 			ri = free_ret_instance(ri);
1840 			utask->depth--;
1841 		} while (ri != next);
1842 	} while (!valid);
1843 
1844 	utask->return_instances = ri;
1845 	return;
1846 
1847  sigill:
1848 	uprobe_warn(current, "handle uretprobe, sending SIGILL.");
1849 	force_sig_info(SIGILL, SEND_SIG_FORCED, current);
1850 
1851 }
1852 
arch_uprobe_ignore(struct arch_uprobe * aup,struct pt_regs * regs)1853 bool __weak arch_uprobe_ignore(struct arch_uprobe *aup, struct pt_regs *regs)
1854 {
1855 	return false;
1856 }
1857 
arch_uretprobe_is_alive(struct return_instance * ret,enum rp_check ctx,struct pt_regs * regs)1858 bool __weak arch_uretprobe_is_alive(struct return_instance *ret, enum rp_check ctx,
1859 					struct pt_regs *regs)
1860 {
1861 	return true;
1862 }
1863 
1864 /*
1865  * Run handler and ask thread to singlestep.
1866  * Ensure all non-fatal signals cannot interrupt thread while it singlesteps.
1867  */
handle_swbp(struct pt_regs * regs)1868 static void handle_swbp(struct pt_regs *regs)
1869 {
1870 	struct uprobe *uprobe;
1871 	unsigned long bp_vaddr;
1872 	int uninitialized_var(is_swbp);
1873 
1874 	bp_vaddr = uprobe_get_swbp_addr(regs);
1875 	if (bp_vaddr == get_trampoline_vaddr())
1876 		return handle_trampoline(regs);
1877 
1878 	uprobe = find_active_uprobe(bp_vaddr, &is_swbp);
1879 	if (!uprobe) {
1880 		if (is_swbp > 0) {
1881 			/* No matching uprobe; signal SIGTRAP. */
1882 			send_sig(SIGTRAP, current, 0);
1883 		} else {
1884 			/*
1885 			 * Either we raced with uprobe_unregister() or we can't
1886 			 * access this memory. The latter is only possible if
1887 			 * another thread plays with our ->mm. In both cases
1888 			 * we can simply restart. If this vma was unmapped we
1889 			 * can pretend this insn was not executed yet and get
1890 			 * the (correct) SIGSEGV after restart.
1891 			 */
1892 			instruction_pointer_set(regs, bp_vaddr);
1893 		}
1894 		return;
1895 	}
1896 
1897 	/* change it in advance for ->handler() and restart */
1898 	instruction_pointer_set(regs, bp_vaddr);
1899 
1900 	/*
1901 	 * TODO: move copy_insn/etc into _register and remove this hack.
1902 	 * After we hit the bp, _unregister + _register can install the
1903 	 * new and not-yet-analyzed uprobe at the same address, restart.
1904 	 */
1905 	smp_rmb(); /* pairs with wmb() in install_breakpoint() */
1906 	if (unlikely(!test_bit(UPROBE_COPY_INSN, &uprobe->flags)))
1907 		goto out;
1908 
1909 	/* Tracing handlers use ->utask to communicate with fetch methods */
1910 	if (!get_utask())
1911 		goto out;
1912 
1913 	if (arch_uprobe_ignore(&uprobe->arch, regs))
1914 		goto out;
1915 
1916 	handler_chain(uprobe, regs);
1917 
1918 	if (arch_uprobe_skip_sstep(&uprobe->arch, regs))
1919 		goto out;
1920 
1921 	if (!pre_ssout(uprobe, regs, bp_vaddr))
1922 		return;
1923 
1924 	/* arch_uprobe_skip_sstep() succeeded, or restart if can't singlestep */
1925 out:
1926 	put_uprobe(uprobe);
1927 }
1928 
1929 /*
1930  * Perform required fix-ups and disable singlestep.
1931  * Allow pending signals to take effect.
1932  */
handle_singlestep(struct uprobe_task * utask,struct pt_regs * regs)1933 static void handle_singlestep(struct uprobe_task *utask, struct pt_regs *regs)
1934 {
1935 	struct uprobe *uprobe;
1936 	int err = 0;
1937 
1938 	uprobe = utask->active_uprobe;
1939 	if (utask->state == UTASK_SSTEP_ACK)
1940 		err = arch_uprobe_post_xol(&uprobe->arch, regs);
1941 	else if (utask->state == UTASK_SSTEP_TRAPPED)
1942 		arch_uprobe_abort_xol(&uprobe->arch, regs);
1943 	else
1944 		WARN_ON_ONCE(1);
1945 
1946 	put_uprobe(uprobe);
1947 	utask->active_uprobe = NULL;
1948 	utask->state = UTASK_RUNNING;
1949 	xol_free_insn_slot(current);
1950 
1951 	spin_lock_irq(&current->sighand->siglock);
1952 	recalc_sigpending(); /* see uprobe_deny_signal() */
1953 	spin_unlock_irq(&current->sighand->siglock);
1954 
1955 	if (unlikely(err)) {
1956 		uprobe_warn(current, "execute the probed insn, sending SIGILL.");
1957 		force_sig_info(SIGILL, SEND_SIG_FORCED, current);
1958 	}
1959 }
1960 
1961 /*
1962  * On breakpoint hit, breakpoint notifier sets the TIF_UPROBE flag and
1963  * allows the thread to return from interrupt. After that handle_swbp()
1964  * sets utask->active_uprobe.
1965  *
1966  * On singlestep exception, singlestep notifier sets the TIF_UPROBE flag
1967  * and allows the thread to return from interrupt.
1968  *
1969  * While returning to userspace, thread notices the TIF_UPROBE flag and calls
1970  * uprobe_notify_resume().
1971  */
uprobe_notify_resume(struct pt_regs * regs)1972 void uprobe_notify_resume(struct pt_regs *regs)
1973 {
1974 	struct uprobe_task *utask;
1975 
1976 	clear_thread_flag(TIF_UPROBE);
1977 
1978 	utask = current->utask;
1979 	if (utask && utask->active_uprobe)
1980 		handle_singlestep(utask, regs);
1981 	else
1982 		handle_swbp(regs);
1983 }
1984 
1985 /*
1986  * uprobe_pre_sstep_notifier gets called from interrupt context as part of
1987  * notifier mechanism. Set TIF_UPROBE flag and indicate breakpoint hit.
1988  */
uprobe_pre_sstep_notifier(struct pt_regs * regs)1989 int uprobe_pre_sstep_notifier(struct pt_regs *regs)
1990 {
1991 	if (!current->mm)
1992 		return 0;
1993 
1994 	if (!test_bit(MMF_HAS_UPROBES, &current->mm->flags) &&
1995 	    (!current->utask || !current->utask->return_instances))
1996 		return 0;
1997 
1998 	set_thread_flag(TIF_UPROBE);
1999 	return 1;
2000 }
2001 
2002 /*
2003  * uprobe_post_sstep_notifier gets called in interrupt context as part of notifier
2004  * mechanism. Set TIF_UPROBE flag and indicate completion of singlestep.
2005  */
uprobe_post_sstep_notifier(struct pt_regs * regs)2006 int uprobe_post_sstep_notifier(struct pt_regs *regs)
2007 {
2008 	struct uprobe_task *utask = current->utask;
2009 
2010 	if (!current->mm || !utask || !utask->active_uprobe)
2011 		/* task is currently not uprobed */
2012 		return 0;
2013 
2014 	utask->state = UTASK_SSTEP_ACK;
2015 	set_thread_flag(TIF_UPROBE);
2016 	return 1;
2017 }
2018 
2019 static struct notifier_block uprobe_exception_nb = {
2020 	.notifier_call		= arch_uprobe_exception_notify,
2021 	.priority		= INT_MAX-1,	/* notified after kprobes, kgdb */
2022 };
2023 
init_uprobes(void)2024 static int __init init_uprobes(void)
2025 {
2026 	int i;
2027 
2028 	for (i = 0; i < UPROBES_HASH_SZ; i++)
2029 		mutex_init(&uprobes_mmap_mutex[i]);
2030 
2031 	if (percpu_init_rwsem(&dup_mmap_sem))
2032 		return -ENOMEM;
2033 
2034 	return register_die_notifier(&uprobe_exception_nb);
2035 }
2036 __initcall(init_uprobes);
2037