1 /*
2 * User-space Probes (UProbes)
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License as published by
6 * the Free Software Foundation; either version 2 of the License, or
7 * (at your option) any later version.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write to the Free Software
16 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
17 *
18 * Copyright (C) IBM Corporation, 2008-2012
19 * Authors:
20 * Srikar Dronamraju
21 * Jim Keniston
22 * Copyright (C) 2011-2012 Red Hat, Inc., Peter Zijlstra <pzijlstr@redhat.com>
23 */
24
25 #include <linux/kernel.h>
26 #include <linux/highmem.h>
27 #include <linux/pagemap.h> /* read_mapping_page */
28 #include <linux/slab.h>
29 #include <linux/sched.h>
30 #include <linux/export.h>
31 #include <linux/rmap.h> /* anon_vma_prepare */
32 #include <linux/mmu_notifier.h> /* set_pte_at_notify */
33 #include <linux/swap.h> /* try_to_free_swap */
34 #include <linux/ptrace.h> /* user_enable_single_step */
35 #include <linux/kdebug.h> /* notifier mechanism */
36 #include "../../mm/internal.h" /* munlock_vma_page */
37 #include <linux/percpu-rwsem.h>
38 #include <linux/task_work.h>
39 #include <linux/shmem_fs.h>
40
41 #include <linux/uprobes.h>
42
43 #define UINSNS_PER_PAGE (PAGE_SIZE/UPROBE_XOL_SLOT_BYTES)
44 #define MAX_UPROBE_XOL_SLOTS UINSNS_PER_PAGE
45
46 static struct rb_root uprobes_tree = RB_ROOT;
47 /*
48 * allows us to skip the uprobe_mmap if there are no uprobe events active
49 * at this time. Probably a fine grained per inode count is better?
50 */
51 #define no_uprobe_events() RB_EMPTY_ROOT(&uprobes_tree)
52
53 static DEFINE_SPINLOCK(uprobes_treelock); /* serialize rbtree access */
54
55 #define UPROBES_HASH_SZ 13
56 /* serialize uprobe->pending_list */
57 static struct mutex uprobes_mmap_mutex[UPROBES_HASH_SZ];
58 #define uprobes_mmap_hash(v) (&uprobes_mmap_mutex[((unsigned long)(v)) % UPROBES_HASH_SZ])
59
60 static struct percpu_rw_semaphore dup_mmap_sem;
61
62 /* Have a copy of original instruction */
63 #define UPROBE_COPY_INSN 0
64
65 struct uprobe {
66 struct rb_node rb_node; /* node in the rb tree */
67 atomic_t ref;
68 struct rw_semaphore register_rwsem;
69 struct rw_semaphore consumer_rwsem;
70 struct list_head pending_list;
71 struct uprobe_consumer *consumers;
72 struct inode *inode; /* Also hold a ref to inode */
73 loff_t offset;
74 unsigned long flags;
75
76 /*
77 * The generic code assumes that it has two members of unknown type
78 * owned by the arch-specific code:
79 *
80 * insn - copy_insn() saves the original instruction here for
81 * arch_uprobe_analyze_insn().
82 *
83 * ixol - potentially modified instruction to execute out of
84 * line, copied to xol_area by xol_get_insn_slot().
85 */
86 struct arch_uprobe arch;
87 };
88
89 struct return_instance {
90 struct uprobe *uprobe;
91 unsigned long func;
92 unsigned long orig_ret_vaddr; /* original return address */
93 bool chained; /* true, if instance is nested */
94
95 struct return_instance *next; /* keep as stack */
96 };
97
98 /*
99 * Execute out of line area: anonymous executable mapping installed
100 * by the probed task to execute the copy of the original instruction
101 * mangled by set_swbp().
102 *
103 * On a breakpoint hit, thread contests for a slot. It frees the
104 * slot after singlestep. Currently a fixed number of slots are
105 * allocated.
106 */
107 struct xol_area {
108 wait_queue_head_t wq; /* if all slots are busy */
109 atomic_t slot_count; /* number of in-use slots */
110 unsigned long *bitmap; /* 0 = free slot */
111 struct page *page;
112
113 /*
114 * We keep the vma's vm_start rather than a pointer to the vma
115 * itself. The probed process or a naughty kernel module could make
116 * the vma go away, and we must handle that reasonably gracefully.
117 */
118 unsigned long vaddr; /* Page(s) of instruction slots */
119 };
120
121 /*
122 * valid_vma: Verify if the specified vma is an executable vma
123 * Relax restrictions while unregistering: vm_flags might have
124 * changed after breakpoint was inserted.
125 * - is_register: indicates if we are in register context.
126 * - Return 1 if the specified virtual address is in an
127 * executable vma.
128 */
valid_vma(struct vm_area_struct * vma,bool is_register)129 static bool valid_vma(struct vm_area_struct *vma, bool is_register)
130 {
131 vm_flags_t flags = VM_HUGETLB | VM_MAYEXEC | VM_MAYSHARE;
132
133 if (is_register)
134 flags |= VM_WRITE;
135
136 return vma->vm_file && (vma->vm_flags & flags) == VM_MAYEXEC;
137 }
138
offset_to_vaddr(struct vm_area_struct * vma,loff_t offset)139 static unsigned long offset_to_vaddr(struct vm_area_struct *vma, loff_t offset)
140 {
141 return vma->vm_start + offset - ((loff_t)vma->vm_pgoff << PAGE_SHIFT);
142 }
143
vaddr_to_offset(struct vm_area_struct * vma,unsigned long vaddr)144 static loff_t vaddr_to_offset(struct vm_area_struct *vma, unsigned long vaddr)
145 {
146 return ((loff_t)vma->vm_pgoff << PAGE_SHIFT) + (vaddr - vma->vm_start);
147 }
148
149 /**
150 * __replace_page - replace page in vma by new page.
151 * based on replace_page in mm/ksm.c
152 *
153 * @vma: vma that holds the pte pointing to page
154 * @addr: address the old @page is mapped at
155 * @page: the cowed page we are replacing by kpage
156 * @kpage: the modified page we replace page by
157 *
158 * Returns 0 on success, -EFAULT on failure.
159 */
__replace_page(struct vm_area_struct * vma,unsigned long addr,struct page * page,struct page * kpage)160 static int __replace_page(struct vm_area_struct *vma, unsigned long addr,
161 struct page *page, struct page *kpage)
162 {
163 struct mm_struct *mm = vma->vm_mm;
164 spinlock_t *ptl;
165 pte_t *ptep;
166 int err;
167 /* For mmu_notifiers */
168 const unsigned long mmun_start = addr;
169 const unsigned long mmun_end = addr + PAGE_SIZE;
170 struct mem_cgroup *memcg;
171
172 err = mem_cgroup_try_charge(kpage, vma->vm_mm, GFP_KERNEL, &memcg);
173 if (err)
174 return err;
175
176 /* For try_to_free_swap() and munlock_vma_page() below */
177 lock_page(page);
178
179 mmu_notifier_invalidate_range_start(mm, mmun_start, mmun_end);
180 err = -EAGAIN;
181 ptep = page_check_address(page, mm, addr, &ptl, 0);
182 if (!ptep) {
183 mem_cgroup_cancel_charge(kpage, memcg);
184 goto unlock;
185 }
186
187 get_page(kpage);
188 page_add_new_anon_rmap(kpage, vma, addr);
189 mem_cgroup_commit_charge(kpage, memcg, false);
190 lru_cache_add_active_or_unevictable(kpage, vma);
191
192 if (!PageAnon(page)) {
193 dec_mm_counter(mm, MM_FILEPAGES);
194 inc_mm_counter(mm, MM_ANONPAGES);
195 }
196
197 flush_cache_page(vma, addr, pte_pfn(*ptep));
198 ptep_clear_flush(vma, addr, ptep);
199 set_pte_at_notify(mm, addr, ptep, mk_pte(kpage, vma->vm_page_prot));
200
201 page_remove_rmap(page);
202 if (!page_mapped(page))
203 try_to_free_swap(page);
204 pte_unmap_unlock(ptep, ptl);
205
206 if (vma->vm_flags & VM_LOCKED)
207 munlock_vma_page(page);
208 put_page(page);
209
210 err = 0;
211 unlock:
212 mmu_notifier_invalidate_range_end(mm, mmun_start, mmun_end);
213 unlock_page(page);
214 return err;
215 }
216
217 /**
218 * is_swbp_insn - check if instruction is breakpoint instruction.
219 * @insn: instruction to be checked.
220 * Default implementation of is_swbp_insn
221 * Returns true if @insn is a breakpoint instruction.
222 */
is_swbp_insn(uprobe_opcode_t * insn)223 bool __weak is_swbp_insn(uprobe_opcode_t *insn)
224 {
225 return *insn == UPROBE_SWBP_INSN;
226 }
227
228 /**
229 * is_trap_insn - check if instruction is breakpoint instruction.
230 * @insn: instruction to be checked.
231 * Default implementation of is_trap_insn
232 * Returns true if @insn is a breakpoint instruction.
233 *
234 * This function is needed for the case where an architecture has multiple
235 * trap instructions (like powerpc).
236 */
is_trap_insn(uprobe_opcode_t * insn)237 bool __weak is_trap_insn(uprobe_opcode_t *insn)
238 {
239 return is_swbp_insn(insn);
240 }
241
copy_from_page(struct page * page,unsigned long vaddr,void * dst,int len)242 static void copy_from_page(struct page *page, unsigned long vaddr, void *dst, int len)
243 {
244 void *kaddr = kmap_atomic(page);
245 memcpy(dst, kaddr + (vaddr & ~PAGE_MASK), len);
246 kunmap_atomic(kaddr);
247 }
248
copy_to_page(struct page * page,unsigned long vaddr,const void * src,int len)249 static void copy_to_page(struct page *page, unsigned long vaddr, const void *src, int len)
250 {
251 void *kaddr = kmap_atomic(page);
252 memcpy(kaddr + (vaddr & ~PAGE_MASK), src, len);
253 kunmap_atomic(kaddr);
254 }
255
verify_opcode(struct page * page,unsigned long vaddr,uprobe_opcode_t * new_opcode)256 static int verify_opcode(struct page *page, unsigned long vaddr, uprobe_opcode_t *new_opcode)
257 {
258 uprobe_opcode_t old_opcode;
259 bool is_swbp;
260
261 /*
262 * Note: We only check if the old_opcode is UPROBE_SWBP_INSN here.
263 * We do not check if it is any other 'trap variant' which could
264 * be conditional trap instruction such as the one powerpc supports.
265 *
266 * The logic is that we do not care if the underlying instruction
267 * is a trap variant; uprobes always wins over any other (gdb)
268 * breakpoint.
269 */
270 copy_from_page(page, vaddr, &old_opcode, UPROBE_SWBP_INSN_SIZE);
271 is_swbp = is_swbp_insn(&old_opcode);
272
273 if (is_swbp_insn(new_opcode)) {
274 if (is_swbp) /* register: already installed? */
275 return 0;
276 } else {
277 if (!is_swbp) /* unregister: was it changed by us? */
278 return 0;
279 }
280
281 return 1;
282 }
283
284 /*
285 * NOTE:
286 * Expect the breakpoint instruction to be the smallest size instruction for
287 * the architecture. If an arch has variable length instruction and the
288 * breakpoint instruction is not of the smallest length instruction
289 * supported by that architecture then we need to modify is_trap_at_addr and
290 * uprobe_write_opcode accordingly. This would never be a problem for archs
291 * that have fixed length instructions.
292 *
293 * uprobe_write_opcode - write the opcode at a given virtual address.
294 * @mm: the probed process address space.
295 * @vaddr: the virtual address to store the opcode.
296 * @opcode: opcode to be written at @vaddr.
297 *
298 * Called with mm->mmap_sem held for write.
299 * Return 0 (success) or a negative errno.
300 */
uprobe_write_opcode(struct mm_struct * mm,unsigned long vaddr,uprobe_opcode_t opcode)301 int uprobe_write_opcode(struct mm_struct *mm, unsigned long vaddr,
302 uprobe_opcode_t opcode)
303 {
304 struct page *old_page, *new_page;
305 struct vm_area_struct *vma;
306 int ret;
307
308 retry:
309 /* Read the page with vaddr into memory */
310 ret = get_user_pages(NULL, mm, vaddr, 1, 0, 1, &old_page, &vma);
311 if (ret <= 0)
312 return ret;
313
314 ret = verify_opcode(old_page, vaddr, &opcode);
315 if (ret <= 0)
316 goto put_old;
317
318 ret = anon_vma_prepare(vma);
319 if (ret)
320 goto put_old;
321
322 ret = -ENOMEM;
323 new_page = alloc_page_vma(GFP_HIGHUSER_MOVABLE, vma, vaddr);
324 if (!new_page)
325 goto put_old;
326
327 __SetPageUptodate(new_page);
328 copy_highpage(new_page, old_page);
329 copy_to_page(new_page, vaddr, &opcode, UPROBE_SWBP_INSN_SIZE);
330
331 ret = __replace_page(vma, vaddr, old_page, new_page);
332 page_cache_release(new_page);
333 put_old:
334 put_page(old_page);
335
336 if (unlikely(ret == -EAGAIN))
337 goto retry;
338 return ret;
339 }
340
341 /**
342 * set_swbp - store breakpoint at a given address.
343 * @auprobe: arch specific probepoint information.
344 * @mm: the probed process address space.
345 * @vaddr: the virtual address to insert the opcode.
346 *
347 * For mm @mm, store the breakpoint instruction at @vaddr.
348 * Return 0 (success) or a negative errno.
349 */
set_swbp(struct arch_uprobe * auprobe,struct mm_struct * mm,unsigned long vaddr)350 int __weak set_swbp(struct arch_uprobe *auprobe, struct mm_struct *mm, unsigned long vaddr)
351 {
352 return uprobe_write_opcode(mm, vaddr, UPROBE_SWBP_INSN);
353 }
354
355 /**
356 * set_orig_insn - Restore the original instruction.
357 * @mm: the probed process address space.
358 * @auprobe: arch specific probepoint information.
359 * @vaddr: the virtual address to insert the opcode.
360 *
361 * For mm @mm, restore the original opcode (opcode) at @vaddr.
362 * Return 0 (success) or a negative errno.
363 */
364 int __weak
set_orig_insn(struct arch_uprobe * auprobe,struct mm_struct * mm,unsigned long vaddr)365 set_orig_insn(struct arch_uprobe *auprobe, struct mm_struct *mm, unsigned long vaddr)
366 {
367 return uprobe_write_opcode(mm, vaddr, *(uprobe_opcode_t *)&auprobe->insn);
368 }
369
match_uprobe(struct uprobe * l,struct uprobe * r)370 static int match_uprobe(struct uprobe *l, struct uprobe *r)
371 {
372 if (l->inode < r->inode)
373 return -1;
374
375 if (l->inode > r->inode)
376 return 1;
377
378 if (l->offset < r->offset)
379 return -1;
380
381 if (l->offset > r->offset)
382 return 1;
383
384 return 0;
385 }
386
__find_uprobe(struct inode * inode,loff_t offset)387 static struct uprobe *__find_uprobe(struct inode *inode, loff_t offset)
388 {
389 struct uprobe u = { .inode = inode, .offset = offset };
390 struct rb_node *n = uprobes_tree.rb_node;
391 struct uprobe *uprobe;
392 int match;
393
394 while (n) {
395 uprobe = rb_entry(n, struct uprobe, rb_node);
396 match = match_uprobe(&u, uprobe);
397 if (!match) {
398 atomic_inc(&uprobe->ref);
399 return uprobe;
400 }
401
402 if (match < 0)
403 n = n->rb_left;
404 else
405 n = n->rb_right;
406 }
407 return NULL;
408 }
409
410 /*
411 * Find a uprobe corresponding to a given inode:offset
412 * Acquires uprobes_treelock
413 */
find_uprobe(struct inode * inode,loff_t offset)414 static struct uprobe *find_uprobe(struct inode *inode, loff_t offset)
415 {
416 struct uprobe *uprobe;
417
418 spin_lock(&uprobes_treelock);
419 uprobe = __find_uprobe(inode, offset);
420 spin_unlock(&uprobes_treelock);
421
422 return uprobe;
423 }
424
__insert_uprobe(struct uprobe * uprobe)425 static struct uprobe *__insert_uprobe(struct uprobe *uprobe)
426 {
427 struct rb_node **p = &uprobes_tree.rb_node;
428 struct rb_node *parent = NULL;
429 struct uprobe *u;
430 int match;
431
432 while (*p) {
433 parent = *p;
434 u = rb_entry(parent, struct uprobe, rb_node);
435 match = match_uprobe(uprobe, u);
436 if (!match) {
437 atomic_inc(&u->ref);
438 return u;
439 }
440
441 if (match < 0)
442 p = &parent->rb_left;
443 else
444 p = &parent->rb_right;
445
446 }
447
448 u = NULL;
449 rb_link_node(&uprobe->rb_node, parent, p);
450 rb_insert_color(&uprobe->rb_node, &uprobes_tree);
451 /* get access + creation ref */
452 atomic_set(&uprobe->ref, 2);
453
454 return u;
455 }
456
457 /*
458 * Acquire uprobes_treelock.
459 * Matching uprobe already exists in rbtree;
460 * increment (access refcount) and return the matching uprobe.
461 *
462 * No matching uprobe; insert the uprobe in rb_tree;
463 * get a double refcount (access + creation) and return NULL.
464 */
insert_uprobe(struct uprobe * uprobe)465 static struct uprobe *insert_uprobe(struct uprobe *uprobe)
466 {
467 struct uprobe *u;
468
469 spin_lock(&uprobes_treelock);
470 u = __insert_uprobe(uprobe);
471 spin_unlock(&uprobes_treelock);
472
473 return u;
474 }
475
put_uprobe(struct uprobe * uprobe)476 static void put_uprobe(struct uprobe *uprobe)
477 {
478 if (atomic_dec_and_test(&uprobe->ref))
479 kfree(uprobe);
480 }
481
alloc_uprobe(struct inode * inode,loff_t offset)482 static struct uprobe *alloc_uprobe(struct inode *inode, loff_t offset)
483 {
484 struct uprobe *uprobe, *cur_uprobe;
485
486 uprobe = kzalloc(sizeof(struct uprobe), GFP_KERNEL);
487 if (!uprobe)
488 return NULL;
489
490 uprobe->inode = igrab(inode);
491 uprobe->offset = offset;
492 init_rwsem(&uprobe->register_rwsem);
493 init_rwsem(&uprobe->consumer_rwsem);
494
495 /* add to uprobes_tree, sorted on inode:offset */
496 cur_uprobe = insert_uprobe(uprobe);
497 /* a uprobe exists for this inode:offset combination */
498 if (cur_uprobe) {
499 kfree(uprobe);
500 uprobe = cur_uprobe;
501 iput(inode);
502 }
503
504 return uprobe;
505 }
506
consumer_add(struct uprobe * uprobe,struct uprobe_consumer * uc)507 static void consumer_add(struct uprobe *uprobe, struct uprobe_consumer *uc)
508 {
509 down_write(&uprobe->consumer_rwsem);
510 uc->next = uprobe->consumers;
511 uprobe->consumers = uc;
512 up_write(&uprobe->consumer_rwsem);
513 }
514
515 /*
516 * For uprobe @uprobe, delete the consumer @uc.
517 * Return true if the @uc is deleted successfully
518 * or return false.
519 */
consumer_del(struct uprobe * uprobe,struct uprobe_consumer * uc)520 static bool consumer_del(struct uprobe *uprobe, struct uprobe_consumer *uc)
521 {
522 struct uprobe_consumer **con;
523 bool ret = false;
524
525 down_write(&uprobe->consumer_rwsem);
526 for (con = &uprobe->consumers; *con; con = &(*con)->next) {
527 if (*con == uc) {
528 *con = uc->next;
529 ret = true;
530 break;
531 }
532 }
533 up_write(&uprobe->consumer_rwsem);
534
535 return ret;
536 }
537
__copy_insn(struct address_space * mapping,struct file * filp,void * insn,int nbytes,loff_t offset)538 static int __copy_insn(struct address_space *mapping, struct file *filp,
539 void *insn, int nbytes, loff_t offset)
540 {
541 struct page *page;
542 /*
543 * Ensure that the page that has the original instruction is populated
544 * and in page-cache. If ->readpage == NULL it must be shmem_mapping(),
545 * see uprobe_register().
546 */
547 if (mapping->a_ops->readpage)
548 page = read_mapping_page(mapping, offset >> PAGE_CACHE_SHIFT, filp);
549 else
550 page = shmem_read_mapping_page(mapping, offset >> PAGE_CACHE_SHIFT);
551 if (IS_ERR(page))
552 return PTR_ERR(page);
553
554 copy_from_page(page, offset, insn, nbytes);
555 page_cache_release(page);
556
557 return 0;
558 }
559
copy_insn(struct uprobe * uprobe,struct file * filp)560 static int copy_insn(struct uprobe *uprobe, struct file *filp)
561 {
562 struct address_space *mapping = uprobe->inode->i_mapping;
563 loff_t offs = uprobe->offset;
564 void *insn = &uprobe->arch.insn;
565 int size = sizeof(uprobe->arch.insn);
566 int len, err = -EIO;
567
568 /* Copy only available bytes, -EIO if nothing was read */
569 do {
570 if (offs >= i_size_read(uprobe->inode))
571 break;
572
573 len = min_t(int, size, PAGE_SIZE - (offs & ~PAGE_MASK));
574 err = __copy_insn(mapping, filp, insn, len, offs);
575 if (err)
576 break;
577
578 insn += len;
579 offs += len;
580 size -= len;
581 } while (size);
582
583 return err;
584 }
585
prepare_uprobe(struct uprobe * uprobe,struct file * file,struct mm_struct * mm,unsigned long vaddr)586 static int prepare_uprobe(struct uprobe *uprobe, struct file *file,
587 struct mm_struct *mm, unsigned long vaddr)
588 {
589 int ret = 0;
590
591 if (test_bit(UPROBE_COPY_INSN, &uprobe->flags))
592 return ret;
593
594 /* TODO: move this into _register, until then we abuse this sem. */
595 down_write(&uprobe->consumer_rwsem);
596 if (test_bit(UPROBE_COPY_INSN, &uprobe->flags))
597 goto out;
598
599 ret = copy_insn(uprobe, file);
600 if (ret)
601 goto out;
602
603 ret = -ENOTSUPP;
604 if (is_trap_insn((uprobe_opcode_t *)&uprobe->arch.insn))
605 goto out;
606
607 ret = arch_uprobe_analyze_insn(&uprobe->arch, mm, vaddr);
608 if (ret)
609 goto out;
610
611 /* uprobe_write_opcode() assumes we don't cross page boundary */
612 BUG_ON((uprobe->offset & ~PAGE_MASK) +
613 UPROBE_SWBP_INSN_SIZE > PAGE_SIZE);
614
615 smp_wmb(); /* pairs with rmb() in find_active_uprobe() */
616 set_bit(UPROBE_COPY_INSN, &uprobe->flags);
617
618 out:
619 up_write(&uprobe->consumer_rwsem);
620
621 return ret;
622 }
623
consumer_filter(struct uprobe_consumer * uc,enum uprobe_filter_ctx ctx,struct mm_struct * mm)624 static inline bool consumer_filter(struct uprobe_consumer *uc,
625 enum uprobe_filter_ctx ctx, struct mm_struct *mm)
626 {
627 return !uc->filter || uc->filter(uc, ctx, mm);
628 }
629
filter_chain(struct uprobe * uprobe,enum uprobe_filter_ctx ctx,struct mm_struct * mm)630 static bool filter_chain(struct uprobe *uprobe,
631 enum uprobe_filter_ctx ctx, struct mm_struct *mm)
632 {
633 struct uprobe_consumer *uc;
634 bool ret = false;
635
636 down_read(&uprobe->consumer_rwsem);
637 for (uc = uprobe->consumers; uc; uc = uc->next) {
638 ret = consumer_filter(uc, ctx, mm);
639 if (ret)
640 break;
641 }
642 up_read(&uprobe->consumer_rwsem);
643
644 return ret;
645 }
646
647 static int
install_breakpoint(struct uprobe * uprobe,struct mm_struct * mm,struct vm_area_struct * vma,unsigned long vaddr)648 install_breakpoint(struct uprobe *uprobe, struct mm_struct *mm,
649 struct vm_area_struct *vma, unsigned long vaddr)
650 {
651 bool first_uprobe;
652 int ret;
653
654 ret = prepare_uprobe(uprobe, vma->vm_file, mm, vaddr);
655 if (ret)
656 return ret;
657
658 /*
659 * set MMF_HAS_UPROBES in advance for uprobe_pre_sstep_notifier(),
660 * the task can hit this breakpoint right after __replace_page().
661 */
662 first_uprobe = !test_bit(MMF_HAS_UPROBES, &mm->flags);
663 if (first_uprobe)
664 set_bit(MMF_HAS_UPROBES, &mm->flags);
665
666 ret = set_swbp(&uprobe->arch, mm, vaddr);
667 if (!ret)
668 clear_bit(MMF_RECALC_UPROBES, &mm->flags);
669 else if (first_uprobe)
670 clear_bit(MMF_HAS_UPROBES, &mm->flags);
671
672 return ret;
673 }
674
675 static int
remove_breakpoint(struct uprobe * uprobe,struct mm_struct * mm,unsigned long vaddr)676 remove_breakpoint(struct uprobe *uprobe, struct mm_struct *mm, unsigned long vaddr)
677 {
678 set_bit(MMF_RECALC_UPROBES, &mm->flags);
679 return set_orig_insn(&uprobe->arch, mm, vaddr);
680 }
681
uprobe_is_active(struct uprobe * uprobe)682 static inline bool uprobe_is_active(struct uprobe *uprobe)
683 {
684 return !RB_EMPTY_NODE(&uprobe->rb_node);
685 }
686 /*
687 * There could be threads that have already hit the breakpoint. They
688 * will recheck the current insn and restart if find_uprobe() fails.
689 * See find_active_uprobe().
690 */
delete_uprobe(struct uprobe * uprobe)691 static void delete_uprobe(struct uprobe *uprobe)
692 {
693 if (WARN_ON(!uprobe_is_active(uprobe)))
694 return;
695
696 spin_lock(&uprobes_treelock);
697 rb_erase(&uprobe->rb_node, &uprobes_tree);
698 spin_unlock(&uprobes_treelock);
699 RB_CLEAR_NODE(&uprobe->rb_node); /* for uprobe_is_active() */
700 iput(uprobe->inode);
701 put_uprobe(uprobe);
702 }
703
704 struct map_info {
705 struct map_info *next;
706 struct mm_struct *mm;
707 unsigned long vaddr;
708 };
709
free_map_info(struct map_info * info)710 static inline struct map_info *free_map_info(struct map_info *info)
711 {
712 struct map_info *next = info->next;
713 kfree(info);
714 return next;
715 }
716
717 static struct map_info *
build_map_info(struct address_space * mapping,loff_t offset,bool is_register)718 build_map_info(struct address_space *mapping, loff_t offset, bool is_register)
719 {
720 unsigned long pgoff = offset >> PAGE_SHIFT;
721 struct vm_area_struct *vma;
722 struct map_info *curr = NULL;
723 struct map_info *prev = NULL;
724 struct map_info *info;
725 int more = 0;
726
727 again:
728 mutex_lock(&mapping->i_mmap_mutex);
729 vma_interval_tree_foreach(vma, &mapping->i_mmap, pgoff, pgoff) {
730 if (!valid_vma(vma, is_register))
731 continue;
732
733 if (!prev && !more) {
734 /*
735 * Needs GFP_NOWAIT to avoid i_mmap_mutex recursion through
736 * reclaim. This is optimistic, no harm done if it fails.
737 */
738 prev = kmalloc(sizeof(struct map_info),
739 GFP_NOWAIT | __GFP_NOMEMALLOC | __GFP_NOWARN);
740 if (prev)
741 prev->next = NULL;
742 }
743 if (!prev) {
744 more++;
745 continue;
746 }
747
748 if (!atomic_inc_not_zero(&vma->vm_mm->mm_users))
749 continue;
750
751 info = prev;
752 prev = prev->next;
753 info->next = curr;
754 curr = info;
755
756 info->mm = vma->vm_mm;
757 info->vaddr = offset_to_vaddr(vma, offset);
758 }
759 mutex_unlock(&mapping->i_mmap_mutex);
760
761 if (!more)
762 goto out;
763
764 prev = curr;
765 while (curr) {
766 mmput(curr->mm);
767 curr = curr->next;
768 }
769
770 do {
771 info = kmalloc(sizeof(struct map_info), GFP_KERNEL);
772 if (!info) {
773 curr = ERR_PTR(-ENOMEM);
774 goto out;
775 }
776 info->next = prev;
777 prev = info;
778 } while (--more);
779
780 goto again;
781 out:
782 while (prev)
783 prev = free_map_info(prev);
784 return curr;
785 }
786
787 static int
register_for_each_vma(struct uprobe * uprobe,struct uprobe_consumer * new)788 register_for_each_vma(struct uprobe *uprobe, struct uprobe_consumer *new)
789 {
790 bool is_register = !!new;
791 struct map_info *info;
792 int err = 0;
793
794 percpu_down_write(&dup_mmap_sem);
795 info = build_map_info(uprobe->inode->i_mapping,
796 uprobe->offset, is_register);
797 if (IS_ERR(info)) {
798 err = PTR_ERR(info);
799 goto out;
800 }
801
802 while (info) {
803 struct mm_struct *mm = info->mm;
804 struct vm_area_struct *vma;
805
806 if (err && is_register)
807 goto free;
808
809 down_write(&mm->mmap_sem);
810 vma = find_vma(mm, info->vaddr);
811 if (!vma || !valid_vma(vma, is_register) ||
812 file_inode(vma->vm_file) != uprobe->inode)
813 goto unlock;
814
815 if (vma->vm_start > info->vaddr ||
816 vaddr_to_offset(vma, info->vaddr) != uprobe->offset)
817 goto unlock;
818
819 if (is_register) {
820 /* consult only the "caller", new consumer. */
821 if (consumer_filter(new,
822 UPROBE_FILTER_REGISTER, mm))
823 err = install_breakpoint(uprobe, mm, vma, info->vaddr);
824 } else if (test_bit(MMF_HAS_UPROBES, &mm->flags)) {
825 if (!filter_chain(uprobe,
826 UPROBE_FILTER_UNREGISTER, mm))
827 err |= remove_breakpoint(uprobe, mm, info->vaddr);
828 }
829
830 unlock:
831 up_write(&mm->mmap_sem);
832 free:
833 mmput(mm);
834 info = free_map_info(info);
835 }
836 out:
837 percpu_up_write(&dup_mmap_sem);
838 return err;
839 }
840
__uprobe_register(struct uprobe * uprobe,struct uprobe_consumer * uc)841 static int __uprobe_register(struct uprobe *uprobe, struct uprobe_consumer *uc)
842 {
843 consumer_add(uprobe, uc);
844 return register_for_each_vma(uprobe, uc);
845 }
846
__uprobe_unregister(struct uprobe * uprobe,struct uprobe_consumer * uc)847 static void __uprobe_unregister(struct uprobe *uprobe, struct uprobe_consumer *uc)
848 {
849 int err;
850
851 if (WARN_ON(!consumer_del(uprobe, uc)))
852 return;
853
854 err = register_for_each_vma(uprobe, NULL);
855 /* TODO : cant unregister? schedule a worker thread */
856 if (!uprobe->consumers && !err)
857 delete_uprobe(uprobe);
858 }
859
860 /*
861 * uprobe_register - register a probe
862 * @inode: the file in which the probe has to be placed.
863 * @offset: offset from the start of the file.
864 * @uc: information on howto handle the probe..
865 *
866 * Apart from the access refcount, uprobe_register() takes a creation
867 * refcount (thro alloc_uprobe) if and only if this @uprobe is getting
868 * inserted into the rbtree (i.e first consumer for a @inode:@offset
869 * tuple). Creation refcount stops uprobe_unregister from freeing the
870 * @uprobe even before the register operation is complete. Creation
871 * refcount is released when the last @uc for the @uprobe
872 * unregisters.
873 *
874 * Return errno if it cannot successully install probes
875 * else return 0 (success)
876 */
uprobe_register(struct inode * inode,loff_t offset,struct uprobe_consumer * uc)877 int uprobe_register(struct inode *inode, loff_t offset, struct uprobe_consumer *uc)
878 {
879 struct uprobe *uprobe;
880 int ret;
881
882 /* Uprobe must have at least one set consumer */
883 if (!uc->handler && !uc->ret_handler)
884 return -EINVAL;
885
886 /* copy_insn() uses read_mapping_page() or shmem_read_mapping_page() */
887 if (!inode->i_mapping->a_ops->readpage && !shmem_mapping(inode->i_mapping))
888 return -EIO;
889 /* Racy, just to catch the obvious mistakes */
890 if (offset > i_size_read(inode))
891 return -EINVAL;
892
893 retry:
894 uprobe = alloc_uprobe(inode, offset);
895 if (!uprobe)
896 return -ENOMEM;
897 /*
898 * We can race with uprobe_unregister()->delete_uprobe().
899 * Check uprobe_is_active() and retry if it is false.
900 */
901 down_write(&uprobe->register_rwsem);
902 ret = -EAGAIN;
903 if (likely(uprobe_is_active(uprobe))) {
904 ret = __uprobe_register(uprobe, uc);
905 if (ret)
906 __uprobe_unregister(uprobe, uc);
907 }
908 up_write(&uprobe->register_rwsem);
909 put_uprobe(uprobe);
910
911 if (unlikely(ret == -EAGAIN))
912 goto retry;
913 return ret;
914 }
915 EXPORT_SYMBOL_GPL(uprobe_register);
916
917 /*
918 * uprobe_apply - unregister a already registered probe.
919 * @inode: the file in which the probe has to be removed.
920 * @offset: offset from the start of the file.
921 * @uc: consumer which wants to add more or remove some breakpoints
922 * @add: add or remove the breakpoints
923 */
uprobe_apply(struct inode * inode,loff_t offset,struct uprobe_consumer * uc,bool add)924 int uprobe_apply(struct inode *inode, loff_t offset,
925 struct uprobe_consumer *uc, bool add)
926 {
927 struct uprobe *uprobe;
928 struct uprobe_consumer *con;
929 int ret = -ENOENT;
930
931 uprobe = find_uprobe(inode, offset);
932 if (WARN_ON(!uprobe))
933 return ret;
934
935 down_write(&uprobe->register_rwsem);
936 for (con = uprobe->consumers; con && con != uc ; con = con->next)
937 ;
938 if (con)
939 ret = register_for_each_vma(uprobe, add ? uc : NULL);
940 up_write(&uprobe->register_rwsem);
941 put_uprobe(uprobe);
942
943 return ret;
944 }
945
946 /*
947 * uprobe_unregister - unregister a already registered probe.
948 * @inode: the file in which the probe has to be removed.
949 * @offset: offset from the start of the file.
950 * @uc: identify which probe if multiple probes are colocated.
951 */
uprobe_unregister(struct inode * inode,loff_t offset,struct uprobe_consumer * uc)952 void uprobe_unregister(struct inode *inode, loff_t offset, struct uprobe_consumer *uc)
953 {
954 struct uprobe *uprobe;
955
956 uprobe = find_uprobe(inode, offset);
957 if (WARN_ON(!uprobe))
958 return;
959
960 down_write(&uprobe->register_rwsem);
961 __uprobe_unregister(uprobe, uc);
962 up_write(&uprobe->register_rwsem);
963 put_uprobe(uprobe);
964 }
965 EXPORT_SYMBOL_GPL(uprobe_unregister);
966
unapply_uprobe(struct uprobe * uprobe,struct mm_struct * mm)967 static int unapply_uprobe(struct uprobe *uprobe, struct mm_struct *mm)
968 {
969 struct vm_area_struct *vma;
970 int err = 0;
971
972 down_read(&mm->mmap_sem);
973 for (vma = mm->mmap; vma; vma = vma->vm_next) {
974 unsigned long vaddr;
975 loff_t offset;
976
977 if (!valid_vma(vma, false) ||
978 file_inode(vma->vm_file) != uprobe->inode)
979 continue;
980
981 offset = (loff_t)vma->vm_pgoff << PAGE_SHIFT;
982 if (uprobe->offset < offset ||
983 uprobe->offset >= offset + vma->vm_end - vma->vm_start)
984 continue;
985
986 vaddr = offset_to_vaddr(vma, uprobe->offset);
987 err |= remove_breakpoint(uprobe, mm, vaddr);
988 }
989 up_read(&mm->mmap_sem);
990
991 return err;
992 }
993
994 static struct rb_node *
find_node_in_range(struct inode * inode,loff_t min,loff_t max)995 find_node_in_range(struct inode *inode, loff_t min, loff_t max)
996 {
997 struct rb_node *n = uprobes_tree.rb_node;
998
999 while (n) {
1000 struct uprobe *u = rb_entry(n, struct uprobe, rb_node);
1001
1002 if (inode < u->inode) {
1003 n = n->rb_left;
1004 } else if (inode > u->inode) {
1005 n = n->rb_right;
1006 } else {
1007 if (max < u->offset)
1008 n = n->rb_left;
1009 else if (min > u->offset)
1010 n = n->rb_right;
1011 else
1012 break;
1013 }
1014 }
1015
1016 return n;
1017 }
1018
1019 /*
1020 * For a given range in vma, build a list of probes that need to be inserted.
1021 */
build_probe_list(struct inode * inode,struct vm_area_struct * vma,unsigned long start,unsigned long end,struct list_head * head)1022 static void build_probe_list(struct inode *inode,
1023 struct vm_area_struct *vma,
1024 unsigned long start, unsigned long end,
1025 struct list_head *head)
1026 {
1027 loff_t min, max;
1028 struct rb_node *n, *t;
1029 struct uprobe *u;
1030
1031 INIT_LIST_HEAD(head);
1032 min = vaddr_to_offset(vma, start);
1033 max = min + (end - start) - 1;
1034
1035 spin_lock(&uprobes_treelock);
1036 n = find_node_in_range(inode, min, max);
1037 if (n) {
1038 for (t = n; t; t = rb_prev(t)) {
1039 u = rb_entry(t, struct uprobe, rb_node);
1040 if (u->inode != inode || u->offset < min)
1041 break;
1042 list_add(&u->pending_list, head);
1043 atomic_inc(&u->ref);
1044 }
1045 for (t = n; (t = rb_next(t)); ) {
1046 u = rb_entry(t, struct uprobe, rb_node);
1047 if (u->inode != inode || u->offset > max)
1048 break;
1049 list_add(&u->pending_list, head);
1050 atomic_inc(&u->ref);
1051 }
1052 }
1053 spin_unlock(&uprobes_treelock);
1054 }
1055
1056 /*
1057 * Called from mmap_region/vma_adjust with mm->mmap_sem acquired.
1058 *
1059 * Currently we ignore all errors and always return 0, the callers
1060 * can't handle the failure anyway.
1061 */
uprobe_mmap(struct vm_area_struct * vma)1062 int uprobe_mmap(struct vm_area_struct *vma)
1063 {
1064 struct list_head tmp_list;
1065 struct uprobe *uprobe, *u;
1066 struct inode *inode;
1067
1068 if (no_uprobe_events() || !valid_vma(vma, true))
1069 return 0;
1070
1071 inode = file_inode(vma->vm_file);
1072 if (!inode)
1073 return 0;
1074
1075 mutex_lock(uprobes_mmap_hash(inode));
1076 build_probe_list(inode, vma, vma->vm_start, vma->vm_end, &tmp_list);
1077 /*
1078 * We can race with uprobe_unregister(), this uprobe can be already
1079 * removed. But in this case filter_chain() must return false, all
1080 * consumers have gone away.
1081 */
1082 list_for_each_entry_safe(uprobe, u, &tmp_list, pending_list) {
1083 if (!fatal_signal_pending(current) &&
1084 filter_chain(uprobe, UPROBE_FILTER_MMAP, vma->vm_mm)) {
1085 unsigned long vaddr = offset_to_vaddr(vma, uprobe->offset);
1086 install_breakpoint(uprobe, vma->vm_mm, vma, vaddr);
1087 }
1088 put_uprobe(uprobe);
1089 }
1090 mutex_unlock(uprobes_mmap_hash(inode));
1091
1092 return 0;
1093 }
1094
1095 static bool
vma_has_uprobes(struct vm_area_struct * vma,unsigned long start,unsigned long end)1096 vma_has_uprobes(struct vm_area_struct *vma, unsigned long start, unsigned long end)
1097 {
1098 loff_t min, max;
1099 struct inode *inode;
1100 struct rb_node *n;
1101
1102 inode = file_inode(vma->vm_file);
1103
1104 min = vaddr_to_offset(vma, start);
1105 max = min + (end - start) - 1;
1106
1107 spin_lock(&uprobes_treelock);
1108 n = find_node_in_range(inode, min, max);
1109 spin_unlock(&uprobes_treelock);
1110
1111 return !!n;
1112 }
1113
1114 /*
1115 * Called in context of a munmap of a vma.
1116 */
uprobe_munmap(struct vm_area_struct * vma,unsigned long start,unsigned long end)1117 void uprobe_munmap(struct vm_area_struct *vma, unsigned long start, unsigned long end)
1118 {
1119 if (no_uprobe_events() || !valid_vma(vma, false))
1120 return;
1121
1122 if (!atomic_read(&vma->vm_mm->mm_users)) /* called by mmput() ? */
1123 return;
1124
1125 if (!test_bit(MMF_HAS_UPROBES, &vma->vm_mm->flags) ||
1126 test_bit(MMF_RECALC_UPROBES, &vma->vm_mm->flags))
1127 return;
1128
1129 if (vma_has_uprobes(vma, start, end))
1130 set_bit(MMF_RECALC_UPROBES, &vma->vm_mm->flags);
1131 }
1132
1133 /* Slot allocation for XOL */
xol_add_vma(struct mm_struct * mm,struct xol_area * area)1134 static int xol_add_vma(struct mm_struct *mm, struct xol_area *area)
1135 {
1136 int ret = -EALREADY;
1137
1138 down_write(&mm->mmap_sem);
1139 if (mm->uprobes_state.xol_area)
1140 goto fail;
1141
1142 if (!area->vaddr) {
1143 /* Try to map as high as possible, this is only a hint. */
1144 area->vaddr = get_unmapped_area(NULL, TASK_SIZE - PAGE_SIZE,
1145 PAGE_SIZE, 0, 0);
1146 if (area->vaddr & ~PAGE_MASK) {
1147 ret = area->vaddr;
1148 goto fail;
1149 }
1150 }
1151
1152 ret = install_special_mapping(mm, area->vaddr, PAGE_SIZE,
1153 VM_EXEC|VM_MAYEXEC|VM_DONTCOPY|VM_IO, &area->page);
1154 if (ret)
1155 goto fail;
1156
1157 smp_wmb(); /* pairs with get_xol_area() */
1158 mm->uprobes_state.xol_area = area;
1159 fail:
1160 up_write(&mm->mmap_sem);
1161
1162 return ret;
1163 }
1164
__create_xol_area(unsigned long vaddr)1165 static struct xol_area *__create_xol_area(unsigned long vaddr)
1166 {
1167 struct mm_struct *mm = current->mm;
1168 uprobe_opcode_t insn = UPROBE_SWBP_INSN;
1169 struct xol_area *area;
1170
1171 area = kmalloc(sizeof(*area), GFP_KERNEL);
1172 if (unlikely(!area))
1173 goto out;
1174
1175 area->bitmap = kzalloc(BITS_TO_LONGS(UINSNS_PER_PAGE) * sizeof(long), GFP_KERNEL);
1176 if (!area->bitmap)
1177 goto free_area;
1178
1179 area->page = alloc_page(GFP_HIGHUSER);
1180 if (!area->page)
1181 goto free_bitmap;
1182
1183 area->vaddr = vaddr;
1184 init_waitqueue_head(&area->wq);
1185 /* Reserve the 1st slot for get_trampoline_vaddr() */
1186 set_bit(0, area->bitmap);
1187 atomic_set(&area->slot_count, 1);
1188 copy_to_page(area->page, 0, &insn, UPROBE_SWBP_INSN_SIZE);
1189
1190 if (!xol_add_vma(mm, area))
1191 return area;
1192
1193 __free_page(area->page);
1194 free_bitmap:
1195 kfree(area->bitmap);
1196 free_area:
1197 kfree(area);
1198 out:
1199 return NULL;
1200 }
1201
1202 /*
1203 * get_xol_area - Allocate process's xol_area if necessary.
1204 * This area will be used for storing instructions for execution out of line.
1205 *
1206 * Returns the allocated area or NULL.
1207 */
get_xol_area(void)1208 static struct xol_area *get_xol_area(void)
1209 {
1210 struct mm_struct *mm = current->mm;
1211 struct xol_area *area;
1212
1213 if (!mm->uprobes_state.xol_area)
1214 __create_xol_area(0);
1215
1216 area = mm->uprobes_state.xol_area;
1217 smp_read_barrier_depends(); /* pairs with wmb in xol_add_vma() */
1218 return area;
1219 }
1220
1221 /*
1222 * uprobe_clear_state - Free the area allocated for slots.
1223 */
uprobe_clear_state(struct mm_struct * mm)1224 void uprobe_clear_state(struct mm_struct *mm)
1225 {
1226 struct xol_area *area = mm->uprobes_state.xol_area;
1227
1228 if (!area)
1229 return;
1230
1231 put_page(area->page);
1232 kfree(area->bitmap);
1233 kfree(area);
1234 }
1235
uprobe_start_dup_mmap(void)1236 void uprobe_start_dup_mmap(void)
1237 {
1238 percpu_down_read(&dup_mmap_sem);
1239 }
1240
uprobe_end_dup_mmap(void)1241 void uprobe_end_dup_mmap(void)
1242 {
1243 percpu_up_read(&dup_mmap_sem);
1244 }
1245
uprobe_dup_mmap(struct mm_struct * oldmm,struct mm_struct * newmm)1246 void uprobe_dup_mmap(struct mm_struct *oldmm, struct mm_struct *newmm)
1247 {
1248 newmm->uprobes_state.xol_area = NULL;
1249
1250 if (test_bit(MMF_HAS_UPROBES, &oldmm->flags)) {
1251 set_bit(MMF_HAS_UPROBES, &newmm->flags);
1252 /* unconditionally, dup_mmap() skips VM_DONTCOPY vmas */
1253 set_bit(MMF_RECALC_UPROBES, &newmm->flags);
1254 }
1255 }
1256
1257 /*
1258 * - search for a free slot.
1259 */
xol_take_insn_slot(struct xol_area * area)1260 static unsigned long xol_take_insn_slot(struct xol_area *area)
1261 {
1262 unsigned long slot_addr;
1263 int slot_nr;
1264
1265 do {
1266 slot_nr = find_first_zero_bit(area->bitmap, UINSNS_PER_PAGE);
1267 if (slot_nr < UINSNS_PER_PAGE) {
1268 if (!test_and_set_bit(slot_nr, area->bitmap))
1269 break;
1270
1271 slot_nr = UINSNS_PER_PAGE;
1272 continue;
1273 }
1274 wait_event(area->wq, (atomic_read(&area->slot_count) < UINSNS_PER_PAGE));
1275 } while (slot_nr >= UINSNS_PER_PAGE);
1276
1277 slot_addr = area->vaddr + (slot_nr * UPROBE_XOL_SLOT_BYTES);
1278 atomic_inc(&area->slot_count);
1279
1280 return slot_addr;
1281 }
1282
1283 /*
1284 * xol_get_insn_slot - allocate a slot for xol.
1285 * Returns the allocated slot address or 0.
1286 */
xol_get_insn_slot(struct uprobe * uprobe)1287 static unsigned long xol_get_insn_slot(struct uprobe *uprobe)
1288 {
1289 struct xol_area *area;
1290 unsigned long xol_vaddr;
1291
1292 area = get_xol_area();
1293 if (!area)
1294 return 0;
1295
1296 xol_vaddr = xol_take_insn_slot(area);
1297 if (unlikely(!xol_vaddr))
1298 return 0;
1299
1300 arch_uprobe_copy_ixol(area->page, xol_vaddr,
1301 &uprobe->arch.ixol, sizeof(uprobe->arch.ixol));
1302
1303 return xol_vaddr;
1304 }
1305
1306 /*
1307 * xol_free_insn_slot - If slot was earlier allocated by
1308 * @xol_get_insn_slot(), make the slot available for
1309 * subsequent requests.
1310 */
xol_free_insn_slot(struct task_struct * tsk)1311 static void xol_free_insn_slot(struct task_struct *tsk)
1312 {
1313 struct xol_area *area;
1314 unsigned long vma_end;
1315 unsigned long slot_addr;
1316
1317 if (!tsk->mm || !tsk->mm->uprobes_state.xol_area || !tsk->utask)
1318 return;
1319
1320 slot_addr = tsk->utask->xol_vaddr;
1321 if (unlikely(!slot_addr))
1322 return;
1323
1324 area = tsk->mm->uprobes_state.xol_area;
1325 vma_end = area->vaddr + PAGE_SIZE;
1326 if (area->vaddr <= slot_addr && slot_addr < vma_end) {
1327 unsigned long offset;
1328 int slot_nr;
1329
1330 offset = slot_addr - area->vaddr;
1331 slot_nr = offset / UPROBE_XOL_SLOT_BYTES;
1332 if (slot_nr >= UINSNS_PER_PAGE)
1333 return;
1334
1335 clear_bit(slot_nr, area->bitmap);
1336 atomic_dec(&area->slot_count);
1337 if (waitqueue_active(&area->wq))
1338 wake_up(&area->wq);
1339
1340 tsk->utask->xol_vaddr = 0;
1341 }
1342 }
1343
arch_uprobe_copy_ixol(struct page * page,unsigned long vaddr,void * src,unsigned long len)1344 void __weak arch_uprobe_copy_ixol(struct page *page, unsigned long vaddr,
1345 void *src, unsigned long len)
1346 {
1347 /* Initialize the slot */
1348 copy_to_page(page, vaddr, src, len);
1349
1350 /*
1351 * We probably need flush_icache_user_range() but it needs vma.
1352 * This should work on most of architectures by default. If
1353 * architecture needs to do something different it can define
1354 * its own version of the function.
1355 */
1356 flush_dcache_page(page);
1357 }
1358
1359 /**
1360 * uprobe_get_swbp_addr - compute address of swbp given post-swbp regs
1361 * @regs: Reflects the saved state of the task after it has hit a breakpoint
1362 * instruction.
1363 * Return the address of the breakpoint instruction.
1364 */
uprobe_get_swbp_addr(struct pt_regs * regs)1365 unsigned long __weak uprobe_get_swbp_addr(struct pt_regs *regs)
1366 {
1367 return instruction_pointer(regs) - UPROBE_SWBP_INSN_SIZE;
1368 }
1369
uprobe_get_trap_addr(struct pt_regs * regs)1370 unsigned long uprobe_get_trap_addr(struct pt_regs *regs)
1371 {
1372 struct uprobe_task *utask = current->utask;
1373
1374 if (unlikely(utask && utask->active_uprobe))
1375 return utask->vaddr;
1376
1377 return instruction_pointer(regs);
1378 }
1379
1380 /*
1381 * Called with no locks held.
1382 * Called in context of a exiting or a exec-ing thread.
1383 */
uprobe_free_utask(struct task_struct * t)1384 void uprobe_free_utask(struct task_struct *t)
1385 {
1386 struct uprobe_task *utask = t->utask;
1387 struct return_instance *ri, *tmp;
1388
1389 if (!utask)
1390 return;
1391
1392 if (utask->active_uprobe)
1393 put_uprobe(utask->active_uprobe);
1394
1395 ri = utask->return_instances;
1396 while (ri) {
1397 tmp = ri;
1398 ri = ri->next;
1399
1400 put_uprobe(tmp->uprobe);
1401 kfree(tmp);
1402 }
1403
1404 xol_free_insn_slot(t);
1405 kfree(utask);
1406 t->utask = NULL;
1407 }
1408
1409 /*
1410 * Allocate a uprobe_task object for the task if if necessary.
1411 * Called when the thread hits a breakpoint.
1412 *
1413 * Returns:
1414 * - pointer to new uprobe_task on success
1415 * - NULL otherwise
1416 */
get_utask(void)1417 static struct uprobe_task *get_utask(void)
1418 {
1419 if (!current->utask)
1420 current->utask = kzalloc(sizeof(struct uprobe_task), GFP_KERNEL);
1421 return current->utask;
1422 }
1423
dup_utask(struct task_struct * t,struct uprobe_task * o_utask)1424 static int dup_utask(struct task_struct *t, struct uprobe_task *o_utask)
1425 {
1426 struct uprobe_task *n_utask;
1427 struct return_instance **p, *o, *n;
1428
1429 n_utask = kzalloc(sizeof(struct uprobe_task), GFP_KERNEL);
1430 if (!n_utask)
1431 return -ENOMEM;
1432 t->utask = n_utask;
1433
1434 p = &n_utask->return_instances;
1435 for (o = o_utask->return_instances; o; o = o->next) {
1436 n = kmalloc(sizeof(struct return_instance), GFP_KERNEL);
1437 if (!n)
1438 return -ENOMEM;
1439
1440 *n = *o;
1441 atomic_inc(&n->uprobe->ref);
1442 n->next = NULL;
1443
1444 *p = n;
1445 p = &n->next;
1446 n_utask->depth++;
1447 }
1448
1449 return 0;
1450 }
1451
uprobe_warn(struct task_struct * t,const char * msg)1452 static void uprobe_warn(struct task_struct *t, const char *msg)
1453 {
1454 pr_warn("uprobe: %s:%d failed to %s\n",
1455 current->comm, current->pid, msg);
1456 }
1457
dup_xol_work(struct callback_head * work)1458 static void dup_xol_work(struct callback_head *work)
1459 {
1460 if (current->flags & PF_EXITING)
1461 return;
1462
1463 if (!__create_xol_area(current->utask->dup_xol_addr))
1464 uprobe_warn(current, "dup xol area");
1465 }
1466
1467 /*
1468 * Called in context of a new clone/fork from copy_process.
1469 */
uprobe_copy_process(struct task_struct * t,unsigned long flags)1470 void uprobe_copy_process(struct task_struct *t, unsigned long flags)
1471 {
1472 struct uprobe_task *utask = current->utask;
1473 struct mm_struct *mm = current->mm;
1474 struct xol_area *area;
1475
1476 t->utask = NULL;
1477
1478 if (!utask || !utask->return_instances)
1479 return;
1480
1481 if (mm == t->mm && !(flags & CLONE_VFORK))
1482 return;
1483
1484 if (dup_utask(t, utask))
1485 return uprobe_warn(t, "dup ret instances");
1486
1487 /* The task can fork() after dup_xol_work() fails */
1488 area = mm->uprobes_state.xol_area;
1489 if (!area)
1490 return uprobe_warn(t, "dup xol area");
1491
1492 if (mm == t->mm)
1493 return;
1494
1495 t->utask->dup_xol_addr = area->vaddr;
1496 init_task_work(&t->utask->dup_xol_work, dup_xol_work);
1497 task_work_add(t, &t->utask->dup_xol_work, true);
1498 }
1499
1500 /*
1501 * Current area->vaddr notion assume the trampoline address is always
1502 * equal area->vaddr.
1503 *
1504 * Returns -1 in case the xol_area is not allocated.
1505 */
get_trampoline_vaddr(void)1506 static unsigned long get_trampoline_vaddr(void)
1507 {
1508 struct xol_area *area;
1509 unsigned long trampoline_vaddr = -1;
1510
1511 area = current->mm->uprobes_state.xol_area;
1512 smp_read_barrier_depends();
1513 if (area)
1514 trampoline_vaddr = area->vaddr;
1515
1516 return trampoline_vaddr;
1517 }
1518
prepare_uretprobe(struct uprobe * uprobe,struct pt_regs * regs)1519 static void prepare_uretprobe(struct uprobe *uprobe, struct pt_regs *regs)
1520 {
1521 struct return_instance *ri;
1522 struct uprobe_task *utask;
1523 unsigned long orig_ret_vaddr, trampoline_vaddr;
1524 bool chained = false;
1525
1526 if (!get_xol_area())
1527 return;
1528
1529 utask = get_utask();
1530 if (!utask)
1531 return;
1532
1533 if (utask->depth >= MAX_URETPROBE_DEPTH) {
1534 printk_ratelimited(KERN_INFO "uprobe: omit uretprobe due to"
1535 " nestedness limit pid/tgid=%d/%d\n",
1536 current->pid, current->tgid);
1537 return;
1538 }
1539
1540 ri = kzalloc(sizeof(struct return_instance), GFP_KERNEL);
1541 if (!ri)
1542 goto fail;
1543
1544 trampoline_vaddr = get_trampoline_vaddr();
1545 orig_ret_vaddr = arch_uretprobe_hijack_return_addr(trampoline_vaddr, regs);
1546 if (orig_ret_vaddr == -1)
1547 goto fail;
1548
1549 /*
1550 * We don't want to keep trampoline address in stack, rather keep the
1551 * original return address of first caller thru all the consequent
1552 * instances. This also makes breakpoint unwrapping easier.
1553 */
1554 if (orig_ret_vaddr == trampoline_vaddr) {
1555 if (!utask->return_instances) {
1556 /*
1557 * This situation is not possible. Likely we have an
1558 * attack from user-space.
1559 */
1560 pr_warn("uprobe: unable to set uretprobe pid/tgid=%d/%d\n",
1561 current->pid, current->tgid);
1562 goto fail;
1563 }
1564
1565 chained = true;
1566 orig_ret_vaddr = utask->return_instances->orig_ret_vaddr;
1567 }
1568
1569 atomic_inc(&uprobe->ref);
1570 ri->uprobe = uprobe;
1571 ri->func = instruction_pointer(regs);
1572 ri->orig_ret_vaddr = orig_ret_vaddr;
1573 ri->chained = chained;
1574
1575 utask->depth++;
1576
1577 /* add instance to the stack */
1578 ri->next = utask->return_instances;
1579 utask->return_instances = ri;
1580
1581 return;
1582
1583 fail:
1584 kfree(ri);
1585 }
1586
1587 /* Prepare to single-step probed instruction out of line. */
1588 static int
pre_ssout(struct uprobe * uprobe,struct pt_regs * regs,unsigned long bp_vaddr)1589 pre_ssout(struct uprobe *uprobe, struct pt_regs *regs, unsigned long bp_vaddr)
1590 {
1591 struct uprobe_task *utask;
1592 unsigned long xol_vaddr;
1593 int err;
1594
1595 utask = get_utask();
1596 if (!utask)
1597 return -ENOMEM;
1598
1599 xol_vaddr = xol_get_insn_slot(uprobe);
1600 if (!xol_vaddr)
1601 return -ENOMEM;
1602
1603 utask->xol_vaddr = xol_vaddr;
1604 utask->vaddr = bp_vaddr;
1605
1606 err = arch_uprobe_pre_xol(&uprobe->arch, regs);
1607 if (unlikely(err)) {
1608 xol_free_insn_slot(current);
1609 return err;
1610 }
1611
1612 utask->active_uprobe = uprobe;
1613 utask->state = UTASK_SSTEP;
1614 return 0;
1615 }
1616
1617 /*
1618 * If we are singlestepping, then ensure this thread is not connected to
1619 * non-fatal signals until completion of singlestep. When xol insn itself
1620 * triggers the signal, restart the original insn even if the task is
1621 * already SIGKILL'ed (since coredump should report the correct ip). This
1622 * is even more important if the task has a handler for SIGSEGV/etc, The
1623 * _same_ instruction should be repeated again after return from the signal
1624 * handler, and SSTEP can never finish in this case.
1625 */
uprobe_deny_signal(void)1626 bool uprobe_deny_signal(void)
1627 {
1628 struct task_struct *t = current;
1629 struct uprobe_task *utask = t->utask;
1630
1631 if (likely(!utask || !utask->active_uprobe))
1632 return false;
1633
1634 WARN_ON_ONCE(utask->state != UTASK_SSTEP);
1635
1636 if (signal_pending(t)) {
1637 spin_lock_irq(&t->sighand->siglock);
1638 clear_tsk_thread_flag(t, TIF_SIGPENDING);
1639 spin_unlock_irq(&t->sighand->siglock);
1640
1641 if (__fatal_signal_pending(t) || arch_uprobe_xol_was_trapped(t)) {
1642 utask->state = UTASK_SSTEP_TRAPPED;
1643 set_tsk_thread_flag(t, TIF_UPROBE);
1644 }
1645 }
1646
1647 return true;
1648 }
1649
mmf_recalc_uprobes(struct mm_struct * mm)1650 static void mmf_recalc_uprobes(struct mm_struct *mm)
1651 {
1652 struct vm_area_struct *vma;
1653
1654 for (vma = mm->mmap; vma; vma = vma->vm_next) {
1655 if (!valid_vma(vma, false))
1656 continue;
1657 /*
1658 * This is not strictly accurate, we can race with
1659 * uprobe_unregister() and see the already removed
1660 * uprobe if delete_uprobe() was not yet called.
1661 * Or this uprobe can be filtered out.
1662 */
1663 if (vma_has_uprobes(vma, vma->vm_start, vma->vm_end))
1664 return;
1665 }
1666
1667 clear_bit(MMF_HAS_UPROBES, &mm->flags);
1668 }
1669
is_trap_at_addr(struct mm_struct * mm,unsigned long vaddr)1670 static int is_trap_at_addr(struct mm_struct *mm, unsigned long vaddr)
1671 {
1672 struct page *page;
1673 uprobe_opcode_t opcode;
1674 int result;
1675
1676 pagefault_disable();
1677 result = __copy_from_user_inatomic(&opcode, (void __user*)vaddr,
1678 sizeof(opcode));
1679 pagefault_enable();
1680
1681 if (likely(result == 0))
1682 goto out;
1683
1684 result = get_user_pages(NULL, mm, vaddr, 1, 0, 1, &page, NULL);
1685 if (result < 0)
1686 return result;
1687
1688 copy_from_page(page, vaddr, &opcode, UPROBE_SWBP_INSN_SIZE);
1689 put_page(page);
1690 out:
1691 /* This needs to return true for any variant of the trap insn */
1692 return is_trap_insn(&opcode);
1693 }
1694
find_active_uprobe(unsigned long bp_vaddr,int * is_swbp)1695 static struct uprobe *find_active_uprobe(unsigned long bp_vaddr, int *is_swbp)
1696 {
1697 struct mm_struct *mm = current->mm;
1698 struct uprobe *uprobe = NULL;
1699 struct vm_area_struct *vma;
1700
1701 down_read(&mm->mmap_sem);
1702 vma = find_vma(mm, bp_vaddr);
1703 if (vma && vma->vm_start <= bp_vaddr) {
1704 if (valid_vma(vma, false)) {
1705 struct inode *inode = file_inode(vma->vm_file);
1706 loff_t offset = vaddr_to_offset(vma, bp_vaddr);
1707
1708 uprobe = find_uprobe(inode, offset);
1709 }
1710
1711 if (!uprobe)
1712 *is_swbp = is_trap_at_addr(mm, bp_vaddr);
1713 } else {
1714 *is_swbp = -EFAULT;
1715 }
1716
1717 if (!uprobe && test_and_clear_bit(MMF_RECALC_UPROBES, &mm->flags))
1718 mmf_recalc_uprobes(mm);
1719 up_read(&mm->mmap_sem);
1720
1721 return uprobe;
1722 }
1723
handler_chain(struct uprobe * uprobe,struct pt_regs * regs)1724 static void handler_chain(struct uprobe *uprobe, struct pt_regs *regs)
1725 {
1726 struct uprobe_consumer *uc;
1727 int remove = UPROBE_HANDLER_REMOVE;
1728 bool need_prep = false; /* prepare return uprobe, when needed */
1729
1730 down_read(&uprobe->register_rwsem);
1731 for (uc = uprobe->consumers; uc; uc = uc->next) {
1732 int rc = 0;
1733
1734 if (uc->handler) {
1735 rc = uc->handler(uc, regs);
1736 WARN(rc & ~UPROBE_HANDLER_MASK,
1737 "bad rc=0x%x from %pf()\n", rc, uc->handler);
1738 }
1739
1740 if (uc->ret_handler)
1741 need_prep = true;
1742
1743 remove &= rc;
1744 }
1745
1746 if (need_prep && !remove)
1747 prepare_uretprobe(uprobe, regs); /* put bp at return */
1748
1749 if (remove && uprobe->consumers) {
1750 WARN_ON(!uprobe_is_active(uprobe));
1751 unapply_uprobe(uprobe, current->mm);
1752 }
1753 up_read(&uprobe->register_rwsem);
1754 }
1755
1756 static void
handle_uretprobe_chain(struct return_instance * ri,struct pt_regs * regs)1757 handle_uretprobe_chain(struct return_instance *ri, struct pt_regs *regs)
1758 {
1759 struct uprobe *uprobe = ri->uprobe;
1760 struct uprobe_consumer *uc;
1761
1762 down_read(&uprobe->register_rwsem);
1763 for (uc = uprobe->consumers; uc; uc = uc->next) {
1764 if (uc->ret_handler)
1765 uc->ret_handler(uc, ri->func, regs);
1766 }
1767 up_read(&uprobe->register_rwsem);
1768 }
1769
handle_trampoline(struct pt_regs * regs)1770 static bool handle_trampoline(struct pt_regs *regs)
1771 {
1772 struct uprobe_task *utask;
1773 struct return_instance *ri, *tmp;
1774 bool chained;
1775
1776 utask = current->utask;
1777 if (!utask)
1778 return false;
1779
1780 ri = utask->return_instances;
1781 if (!ri)
1782 return false;
1783
1784 /*
1785 * TODO: we should throw out return_instance's invalidated by
1786 * longjmp(), currently we assume that the probed function always
1787 * returns.
1788 */
1789 instruction_pointer_set(regs, ri->orig_ret_vaddr);
1790
1791 for (;;) {
1792 handle_uretprobe_chain(ri, regs);
1793
1794 chained = ri->chained;
1795 put_uprobe(ri->uprobe);
1796
1797 tmp = ri;
1798 ri = ri->next;
1799 kfree(tmp);
1800 utask->depth--;
1801
1802 if (!chained)
1803 break;
1804 BUG_ON(!ri);
1805 }
1806
1807 utask->return_instances = ri;
1808
1809 return true;
1810 }
1811
arch_uprobe_ignore(struct arch_uprobe * aup,struct pt_regs * regs)1812 bool __weak arch_uprobe_ignore(struct arch_uprobe *aup, struct pt_regs *regs)
1813 {
1814 return false;
1815 }
1816
1817 /*
1818 * Run handler and ask thread to singlestep.
1819 * Ensure all non-fatal signals cannot interrupt thread while it singlesteps.
1820 */
handle_swbp(struct pt_regs * regs)1821 static void handle_swbp(struct pt_regs *regs)
1822 {
1823 struct uprobe *uprobe;
1824 unsigned long bp_vaddr;
1825 int uninitialized_var(is_swbp);
1826
1827 bp_vaddr = uprobe_get_swbp_addr(regs);
1828 if (bp_vaddr == get_trampoline_vaddr()) {
1829 if (handle_trampoline(regs))
1830 return;
1831
1832 pr_warn("uprobe: unable to handle uretprobe pid/tgid=%d/%d\n",
1833 current->pid, current->tgid);
1834 }
1835
1836 uprobe = find_active_uprobe(bp_vaddr, &is_swbp);
1837 if (!uprobe) {
1838 if (is_swbp > 0) {
1839 /* No matching uprobe; signal SIGTRAP. */
1840 send_sig(SIGTRAP, current, 0);
1841 } else {
1842 /*
1843 * Either we raced with uprobe_unregister() or we can't
1844 * access this memory. The latter is only possible if
1845 * another thread plays with our ->mm. In both cases
1846 * we can simply restart. If this vma was unmapped we
1847 * can pretend this insn was not executed yet and get
1848 * the (correct) SIGSEGV after restart.
1849 */
1850 instruction_pointer_set(regs, bp_vaddr);
1851 }
1852 return;
1853 }
1854
1855 /* change it in advance for ->handler() and restart */
1856 instruction_pointer_set(regs, bp_vaddr);
1857
1858 /*
1859 * TODO: move copy_insn/etc into _register and remove this hack.
1860 * After we hit the bp, _unregister + _register can install the
1861 * new and not-yet-analyzed uprobe at the same address, restart.
1862 */
1863 smp_rmb(); /* pairs with wmb() in install_breakpoint() */
1864 if (unlikely(!test_bit(UPROBE_COPY_INSN, &uprobe->flags)))
1865 goto out;
1866
1867 /* Tracing handlers use ->utask to communicate with fetch methods */
1868 if (!get_utask())
1869 goto out;
1870
1871 if (arch_uprobe_ignore(&uprobe->arch, regs))
1872 goto out;
1873
1874 handler_chain(uprobe, regs);
1875
1876 if (arch_uprobe_skip_sstep(&uprobe->arch, regs))
1877 goto out;
1878
1879 if (!pre_ssout(uprobe, regs, bp_vaddr))
1880 return;
1881
1882 /* arch_uprobe_skip_sstep() succeeded, or restart if can't singlestep */
1883 out:
1884 put_uprobe(uprobe);
1885 }
1886
1887 /*
1888 * Perform required fix-ups and disable singlestep.
1889 * Allow pending signals to take effect.
1890 */
handle_singlestep(struct uprobe_task * utask,struct pt_regs * regs)1891 static void handle_singlestep(struct uprobe_task *utask, struct pt_regs *regs)
1892 {
1893 struct uprobe *uprobe;
1894 int err = 0;
1895
1896 uprobe = utask->active_uprobe;
1897 if (utask->state == UTASK_SSTEP_ACK)
1898 err = arch_uprobe_post_xol(&uprobe->arch, regs);
1899 else if (utask->state == UTASK_SSTEP_TRAPPED)
1900 arch_uprobe_abort_xol(&uprobe->arch, regs);
1901 else
1902 WARN_ON_ONCE(1);
1903
1904 put_uprobe(uprobe);
1905 utask->active_uprobe = NULL;
1906 utask->state = UTASK_RUNNING;
1907 xol_free_insn_slot(current);
1908
1909 spin_lock_irq(¤t->sighand->siglock);
1910 recalc_sigpending(); /* see uprobe_deny_signal() */
1911 spin_unlock_irq(¤t->sighand->siglock);
1912
1913 if (unlikely(err)) {
1914 uprobe_warn(current, "execute the probed insn, sending SIGILL.");
1915 force_sig_info(SIGILL, SEND_SIG_FORCED, current);
1916 }
1917 }
1918
1919 /*
1920 * On breakpoint hit, breakpoint notifier sets the TIF_UPROBE flag and
1921 * allows the thread to return from interrupt. After that handle_swbp()
1922 * sets utask->active_uprobe.
1923 *
1924 * On singlestep exception, singlestep notifier sets the TIF_UPROBE flag
1925 * and allows the thread to return from interrupt.
1926 *
1927 * While returning to userspace, thread notices the TIF_UPROBE flag and calls
1928 * uprobe_notify_resume().
1929 */
uprobe_notify_resume(struct pt_regs * regs)1930 void uprobe_notify_resume(struct pt_regs *regs)
1931 {
1932 struct uprobe_task *utask;
1933
1934 clear_thread_flag(TIF_UPROBE);
1935
1936 utask = current->utask;
1937 if (utask && utask->active_uprobe)
1938 handle_singlestep(utask, regs);
1939 else
1940 handle_swbp(regs);
1941 }
1942
1943 /*
1944 * uprobe_pre_sstep_notifier gets called from interrupt context as part of
1945 * notifier mechanism. Set TIF_UPROBE flag and indicate breakpoint hit.
1946 */
uprobe_pre_sstep_notifier(struct pt_regs * regs)1947 int uprobe_pre_sstep_notifier(struct pt_regs *regs)
1948 {
1949 if (!current->mm)
1950 return 0;
1951
1952 if (!test_bit(MMF_HAS_UPROBES, ¤t->mm->flags) &&
1953 (!current->utask || !current->utask->return_instances))
1954 return 0;
1955
1956 set_thread_flag(TIF_UPROBE);
1957 return 1;
1958 }
1959
1960 /*
1961 * uprobe_post_sstep_notifier gets called in interrupt context as part of notifier
1962 * mechanism. Set TIF_UPROBE flag and indicate completion of singlestep.
1963 */
uprobe_post_sstep_notifier(struct pt_regs * regs)1964 int uprobe_post_sstep_notifier(struct pt_regs *regs)
1965 {
1966 struct uprobe_task *utask = current->utask;
1967
1968 if (!current->mm || !utask || !utask->active_uprobe)
1969 /* task is currently not uprobed */
1970 return 0;
1971
1972 utask->state = UTASK_SSTEP_ACK;
1973 set_thread_flag(TIF_UPROBE);
1974 return 1;
1975 }
1976
1977 static struct notifier_block uprobe_exception_nb = {
1978 .notifier_call = arch_uprobe_exception_notify,
1979 .priority = INT_MAX-1, /* notified after kprobes, kgdb */
1980 };
1981
init_uprobes(void)1982 static int __init init_uprobes(void)
1983 {
1984 int i;
1985
1986 for (i = 0; i < UPROBES_HASH_SZ; i++)
1987 mutex_init(&uprobes_mmap_mutex[i]);
1988
1989 if (percpu_init_rwsem(&dup_mmap_sem))
1990 return -ENOMEM;
1991
1992 return register_die_notifier(&uprobe_exception_nb);
1993 }
1994 __initcall(init_uprobes);
1995