1 /* Support for MMIO probes.
2 * Benfit many code from kprobes
3 * (C) 2002 Louis Zhuang <louis.zhuang@intel.com>.
4 * 2007 Alexander Eichner
5 * 2008 Pekka Paalanen <pq@iki.fi>
6 */
7
8 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
9
10 #include <linux/list.h>
11 #include <linux/rculist.h>
12 #include <linux/spinlock.h>
13 #include <linux/hash.h>
14 #include <linux/module.h>
15 #include <linux/kernel.h>
16 #include <linux/uaccess.h>
17 #include <linux/ptrace.h>
18 #include <linux/preempt.h>
19 #include <linux/percpu.h>
20 #include <linux/kdebug.h>
21 #include <linux/mutex.h>
22 #include <linux/io.h>
23 #include <linux/slab.h>
24 #include <asm/cacheflush.h>
25 #include <asm/tlbflush.h>
26 #include <linux/errno.h>
27 #include <asm/debugreg.h>
28 #include <linux/mmiotrace.h>
29
30 #define KMMIO_PAGE_HASH_BITS 4
31 #define KMMIO_PAGE_TABLE_SIZE (1 << KMMIO_PAGE_HASH_BITS)
32
33 struct kmmio_fault_page {
34 struct list_head list;
35 struct kmmio_fault_page *release_next;
36 unsigned long addr; /* the requested address */
37 pteval_t old_presence; /* page presence prior to arming */
38 bool armed;
39
40 /*
41 * Number of times this page has been registered as a part
42 * of a probe. If zero, page is disarmed and this may be freed.
43 * Used only by writers (RCU) and post_kmmio_handler().
44 * Protected by kmmio_lock, when linked into kmmio_page_table.
45 */
46 int count;
47
48 bool scheduled_for_release;
49 };
50
51 struct kmmio_delayed_release {
52 struct rcu_head rcu;
53 struct kmmio_fault_page *release_list;
54 };
55
56 struct kmmio_context {
57 struct kmmio_fault_page *fpage;
58 struct kmmio_probe *probe;
59 unsigned long saved_flags;
60 unsigned long addr;
61 int active;
62 };
63
64 static DEFINE_SPINLOCK(kmmio_lock);
65
66 /* Protected by kmmio_lock */
67 unsigned int kmmio_count;
68
69 /* Read-protected by RCU, write-protected by kmmio_lock. */
70 static struct list_head kmmio_page_table[KMMIO_PAGE_TABLE_SIZE];
71 static LIST_HEAD(kmmio_probes);
72
kmmio_page_list(unsigned long addr)73 static struct list_head *kmmio_page_list(unsigned long addr)
74 {
75 unsigned int l;
76 pte_t *pte = lookup_address(addr, &l);
77
78 if (!pte)
79 return NULL;
80 addr &= page_level_mask(l);
81
82 return &kmmio_page_table[hash_long(addr, KMMIO_PAGE_HASH_BITS)];
83 }
84
85 /* Accessed per-cpu */
86 static DEFINE_PER_CPU(struct kmmio_context, kmmio_ctx);
87
88 /*
89 * this is basically a dynamic stabbing problem:
90 * Could use the existing prio tree code or
91 * Possible better implementations:
92 * The Interval Skip List: A Data Structure for Finding All Intervals That
93 * Overlap a Point (might be simple)
94 * Space Efficient Dynamic Stabbing with Fast Queries - Mikkel Thorup
95 */
96 /* Get the kmmio at this addr (if any). You must be holding RCU read lock. */
get_kmmio_probe(unsigned long addr)97 static struct kmmio_probe *get_kmmio_probe(unsigned long addr)
98 {
99 struct kmmio_probe *p;
100 list_for_each_entry_rcu(p, &kmmio_probes, list) {
101 if (addr >= p->addr && addr < (p->addr + p->len))
102 return p;
103 }
104 return NULL;
105 }
106
107 /* You must be holding RCU read lock. */
get_kmmio_fault_page(unsigned long addr)108 static struct kmmio_fault_page *get_kmmio_fault_page(unsigned long addr)
109 {
110 struct list_head *head;
111 struct kmmio_fault_page *f;
112 unsigned int l;
113 pte_t *pte = lookup_address(addr, &l);
114
115 if (!pte)
116 return NULL;
117 addr &= page_level_mask(l);
118 head = kmmio_page_list(addr);
119 list_for_each_entry_rcu(f, head, list) {
120 if (f->addr == addr)
121 return f;
122 }
123 return NULL;
124 }
125
clear_pmd_presence(pmd_t * pmd,bool clear,pmdval_t * old)126 static void clear_pmd_presence(pmd_t *pmd, bool clear, pmdval_t *old)
127 {
128 pmd_t new_pmd;
129 pmdval_t v = pmd_val(*pmd);
130 if (clear) {
131 *old = v;
132 new_pmd = pmd_mknotpresent(*pmd);
133 } else {
134 /* Presume this has been called with clear==true previously */
135 new_pmd = __pmd(*old);
136 }
137 set_pmd(pmd, new_pmd);
138 }
139
clear_pte_presence(pte_t * pte,bool clear,pteval_t * old)140 static void clear_pte_presence(pte_t *pte, bool clear, pteval_t *old)
141 {
142 pteval_t v = pte_val(*pte);
143 if (clear) {
144 *old = v;
145 /* Nothing should care about address */
146 pte_clear(&init_mm, 0, pte);
147 } else {
148 /* Presume this has been called with clear==true previously */
149 set_pte_atomic(pte, __pte(*old));
150 }
151 }
152
clear_page_presence(struct kmmio_fault_page * f,bool clear)153 static int clear_page_presence(struct kmmio_fault_page *f, bool clear)
154 {
155 unsigned int level;
156 pte_t *pte = lookup_address(f->addr, &level);
157
158 if (!pte) {
159 pr_err("no pte for addr 0x%08lx\n", f->addr);
160 return -1;
161 }
162
163 switch (level) {
164 case PG_LEVEL_2M:
165 clear_pmd_presence((pmd_t *)pte, clear, &f->old_presence);
166 break;
167 case PG_LEVEL_4K:
168 clear_pte_presence(pte, clear, &f->old_presence);
169 break;
170 default:
171 pr_err("unexpected page level 0x%x.\n", level);
172 return -1;
173 }
174
175 __flush_tlb_one(f->addr);
176 return 0;
177 }
178
179 /*
180 * Mark the given page as not present. Access to it will trigger a fault.
181 *
182 * Struct kmmio_fault_page is protected by RCU and kmmio_lock, but the
183 * protection is ignored here. RCU read lock is assumed held, so the struct
184 * will not disappear unexpectedly. Furthermore, the caller must guarantee,
185 * that double arming the same virtual address (page) cannot occur.
186 *
187 * Double disarming on the other hand is allowed, and may occur when a fault
188 * and mmiotrace shutdown happen simultaneously.
189 */
arm_kmmio_fault_page(struct kmmio_fault_page * f)190 static int arm_kmmio_fault_page(struct kmmio_fault_page *f)
191 {
192 int ret;
193 WARN_ONCE(f->armed, KERN_ERR pr_fmt("kmmio page already armed.\n"));
194 if (f->armed) {
195 pr_warning("double-arm: addr 0x%08lx, ref %d, old %d\n",
196 f->addr, f->count, !!f->old_presence);
197 }
198 ret = clear_page_presence(f, true);
199 WARN_ONCE(ret < 0, KERN_ERR pr_fmt("arming at 0x%08lx failed.\n"),
200 f->addr);
201 f->armed = true;
202 return ret;
203 }
204
205 /** Restore the given page to saved presence state. */
disarm_kmmio_fault_page(struct kmmio_fault_page * f)206 static void disarm_kmmio_fault_page(struct kmmio_fault_page *f)
207 {
208 int ret = clear_page_presence(f, false);
209 WARN_ONCE(ret < 0,
210 KERN_ERR "kmmio disarming at 0x%08lx failed.\n", f->addr);
211 f->armed = false;
212 }
213
214 /*
215 * This is being called from do_page_fault().
216 *
217 * We may be in an interrupt or a critical section. Also prefecthing may
218 * trigger a page fault. We may be in the middle of process switch.
219 * We cannot take any locks, because we could be executing especially
220 * within a kmmio critical section.
221 *
222 * Local interrupts are disabled, so preemption cannot happen.
223 * Do not enable interrupts, do not sleep, and watch out for other CPUs.
224 */
225 /*
226 * Interrupts are disabled on entry as trap3 is an interrupt gate
227 * and they remain disabled throughout this function.
228 */
kmmio_handler(struct pt_regs * regs,unsigned long addr)229 int kmmio_handler(struct pt_regs *regs, unsigned long addr)
230 {
231 struct kmmio_context *ctx;
232 struct kmmio_fault_page *faultpage;
233 int ret = 0; /* default to fault not handled */
234 unsigned long page_base = addr;
235 unsigned int l;
236 pte_t *pte = lookup_address(addr, &l);
237 if (!pte)
238 return -EINVAL;
239 page_base &= page_level_mask(l);
240
241 /*
242 * Preemption is now disabled to prevent process switch during
243 * single stepping. We can only handle one active kmmio trace
244 * per cpu, so ensure that we finish it before something else
245 * gets to run. We also hold the RCU read lock over single
246 * stepping to avoid looking up the probe and kmmio_fault_page
247 * again.
248 */
249 preempt_disable();
250 rcu_read_lock();
251
252 faultpage = get_kmmio_fault_page(page_base);
253 if (!faultpage) {
254 /*
255 * Either this page fault is not caused by kmmio, or
256 * another CPU just pulled the kmmio probe from under
257 * our feet. The latter case should not be possible.
258 */
259 goto no_kmmio;
260 }
261
262 ctx = &get_cpu_var(kmmio_ctx);
263 if (ctx->active) {
264 if (page_base == ctx->addr) {
265 /*
266 * A second fault on the same page means some other
267 * condition needs handling by do_page_fault(), the
268 * page really not being present is the most common.
269 */
270 pr_debug("secondary hit for 0x%08lx CPU %d.\n",
271 addr, smp_processor_id());
272
273 if (!faultpage->old_presence)
274 pr_info("unexpected secondary hit for address 0x%08lx on CPU %d.\n",
275 addr, smp_processor_id());
276 } else {
277 /*
278 * Prevent overwriting already in-flight context.
279 * This should not happen, let's hope disarming at
280 * least prevents a panic.
281 */
282 pr_emerg("recursive probe hit on CPU %d, for address 0x%08lx. Ignoring.\n",
283 smp_processor_id(), addr);
284 pr_emerg("previous hit was at 0x%08lx.\n", ctx->addr);
285 disarm_kmmio_fault_page(faultpage);
286 }
287 goto no_kmmio_ctx;
288 }
289 ctx->active++;
290
291 ctx->fpage = faultpage;
292 ctx->probe = get_kmmio_probe(page_base);
293 ctx->saved_flags = (regs->flags & (X86_EFLAGS_TF | X86_EFLAGS_IF));
294 ctx->addr = page_base;
295
296 if (ctx->probe && ctx->probe->pre_handler)
297 ctx->probe->pre_handler(ctx->probe, regs, addr);
298
299 /*
300 * Enable single-stepping and disable interrupts for the faulting
301 * context. Local interrupts must not get enabled during stepping.
302 */
303 regs->flags |= X86_EFLAGS_TF;
304 regs->flags &= ~X86_EFLAGS_IF;
305
306 /* Now we set present bit in PTE and single step. */
307 disarm_kmmio_fault_page(ctx->fpage);
308
309 /*
310 * If another cpu accesses the same page while we are stepping,
311 * the access will not be caught. It will simply succeed and the
312 * only downside is we lose the event. If this becomes a problem,
313 * the user should drop to single cpu before tracing.
314 */
315
316 put_cpu_var(kmmio_ctx);
317 return 1; /* fault handled */
318
319 no_kmmio_ctx:
320 put_cpu_var(kmmio_ctx);
321 no_kmmio:
322 rcu_read_unlock();
323 preempt_enable_no_resched();
324 return ret;
325 }
326
327 /*
328 * Interrupts are disabled on entry as trap1 is an interrupt gate
329 * and they remain disabled throughout this function.
330 * This must always get called as the pair to kmmio_handler().
331 */
post_kmmio_handler(unsigned long condition,struct pt_regs * regs)332 static int post_kmmio_handler(unsigned long condition, struct pt_regs *regs)
333 {
334 int ret = 0;
335 struct kmmio_context *ctx = &get_cpu_var(kmmio_ctx);
336
337 if (!ctx->active) {
338 /*
339 * debug traps without an active context are due to either
340 * something external causing them (f.e. using a debugger while
341 * mmio tracing enabled), or erroneous behaviour
342 */
343 pr_warning("unexpected debug trap on CPU %d.\n",
344 smp_processor_id());
345 goto out;
346 }
347
348 if (ctx->probe && ctx->probe->post_handler)
349 ctx->probe->post_handler(ctx->probe, condition, regs);
350
351 /* Prevent racing against release_kmmio_fault_page(). */
352 spin_lock(&kmmio_lock);
353 if (ctx->fpage->count)
354 arm_kmmio_fault_page(ctx->fpage);
355 spin_unlock(&kmmio_lock);
356
357 regs->flags &= ~X86_EFLAGS_TF;
358 regs->flags |= ctx->saved_flags;
359
360 /* These were acquired in kmmio_handler(). */
361 ctx->active--;
362 BUG_ON(ctx->active);
363 rcu_read_unlock();
364 preempt_enable_no_resched();
365
366 /*
367 * if somebody else is singlestepping across a probe point, flags
368 * will have TF set, in which case, continue the remaining processing
369 * of do_debug, as if this is not a probe hit.
370 */
371 if (!(regs->flags & X86_EFLAGS_TF))
372 ret = 1;
373 out:
374 put_cpu_var(kmmio_ctx);
375 return ret;
376 }
377
378 /* You must be holding kmmio_lock. */
add_kmmio_fault_page(unsigned long addr)379 static int add_kmmio_fault_page(unsigned long addr)
380 {
381 struct kmmio_fault_page *f;
382
383 f = get_kmmio_fault_page(addr);
384 if (f) {
385 if (!f->count)
386 arm_kmmio_fault_page(f);
387 f->count++;
388 return 0;
389 }
390
391 f = kzalloc(sizeof(*f), GFP_ATOMIC);
392 if (!f)
393 return -1;
394
395 f->count = 1;
396 f->addr = addr;
397
398 if (arm_kmmio_fault_page(f)) {
399 kfree(f);
400 return -1;
401 }
402
403 list_add_rcu(&f->list, kmmio_page_list(f->addr));
404
405 return 0;
406 }
407
408 /* You must be holding kmmio_lock. */
release_kmmio_fault_page(unsigned long addr,struct kmmio_fault_page ** release_list)409 static void release_kmmio_fault_page(unsigned long addr,
410 struct kmmio_fault_page **release_list)
411 {
412 struct kmmio_fault_page *f;
413
414 f = get_kmmio_fault_page(addr);
415 if (!f)
416 return;
417
418 f->count--;
419 BUG_ON(f->count < 0);
420 if (!f->count) {
421 disarm_kmmio_fault_page(f);
422 if (!f->scheduled_for_release) {
423 f->release_next = *release_list;
424 *release_list = f;
425 f->scheduled_for_release = true;
426 }
427 }
428 }
429
430 /*
431 * With page-unaligned ioremaps, one or two armed pages may contain
432 * addresses from outside the intended mapping. Events for these addresses
433 * are currently silently dropped. The events may result only from programming
434 * mistakes by accessing addresses before the beginning or past the end of a
435 * mapping.
436 */
register_kmmio_probe(struct kmmio_probe * p)437 int register_kmmio_probe(struct kmmio_probe *p)
438 {
439 unsigned long flags;
440 int ret = 0;
441 unsigned long size = 0;
442 unsigned long addr = p->addr & PAGE_MASK;
443 const unsigned long size_lim = p->len + (p->addr & ~PAGE_MASK);
444 unsigned int l;
445 pte_t *pte;
446
447 spin_lock_irqsave(&kmmio_lock, flags);
448 if (get_kmmio_probe(addr)) {
449 ret = -EEXIST;
450 goto out;
451 }
452
453 pte = lookup_address(addr, &l);
454 if (!pte) {
455 ret = -EINVAL;
456 goto out;
457 }
458
459 kmmio_count++;
460 list_add_rcu(&p->list, &kmmio_probes);
461 while (size < size_lim) {
462 if (add_kmmio_fault_page(addr + size))
463 pr_err("Unable to set page fault.\n");
464 size += page_level_size(l);
465 }
466 out:
467 spin_unlock_irqrestore(&kmmio_lock, flags);
468 /*
469 * XXX: What should I do here?
470 * Here was a call to global_flush_tlb(), but it does not exist
471 * anymore. It seems it's not needed after all.
472 */
473 return ret;
474 }
475 EXPORT_SYMBOL(register_kmmio_probe);
476
rcu_free_kmmio_fault_pages(struct rcu_head * head)477 static void rcu_free_kmmio_fault_pages(struct rcu_head *head)
478 {
479 struct kmmio_delayed_release *dr = container_of(
480 head,
481 struct kmmio_delayed_release,
482 rcu);
483 struct kmmio_fault_page *f = dr->release_list;
484 while (f) {
485 struct kmmio_fault_page *next = f->release_next;
486 BUG_ON(f->count);
487 kfree(f);
488 f = next;
489 }
490 kfree(dr);
491 }
492
remove_kmmio_fault_pages(struct rcu_head * head)493 static void remove_kmmio_fault_pages(struct rcu_head *head)
494 {
495 struct kmmio_delayed_release *dr =
496 container_of(head, struct kmmio_delayed_release, rcu);
497 struct kmmio_fault_page *f = dr->release_list;
498 struct kmmio_fault_page **prevp = &dr->release_list;
499 unsigned long flags;
500
501 spin_lock_irqsave(&kmmio_lock, flags);
502 while (f) {
503 if (!f->count) {
504 list_del_rcu(&f->list);
505 prevp = &f->release_next;
506 } else {
507 *prevp = f->release_next;
508 f->release_next = NULL;
509 f->scheduled_for_release = false;
510 }
511 f = *prevp;
512 }
513 spin_unlock_irqrestore(&kmmio_lock, flags);
514
515 /* This is the real RCU destroy call. */
516 call_rcu(&dr->rcu, rcu_free_kmmio_fault_pages);
517 }
518
519 /*
520 * Remove a kmmio probe. You have to synchronize_rcu() before you can be
521 * sure that the callbacks will not be called anymore. Only after that
522 * you may actually release your struct kmmio_probe.
523 *
524 * Unregistering a kmmio fault page has three steps:
525 * 1. release_kmmio_fault_page()
526 * Disarm the page, wait a grace period to let all faults finish.
527 * 2. remove_kmmio_fault_pages()
528 * Remove the pages from kmmio_page_table.
529 * 3. rcu_free_kmmio_fault_pages()
530 * Actually free the kmmio_fault_page structs as with RCU.
531 */
unregister_kmmio_probe(struct kmmio_probe * p)532 void unregister_kmmio_probe(struct kmmio_probe *p)
533 {
534 unsigned long flags;
535 unsigned long size = 0;
536 unsigned long addr = p->addr & PAGE_MASK;
537 const unsigned long size_lim = p->len + (p->addr & ~PAGE_MASK);
538 struct kmmio_fault_page *release_list = NULL;
539 struct kmmio_delayed_release *drelease;
540 unsigned int l;
541 pte_t *pte;
542
543 pte = lookup_address(addr, &l);
544 if (!pte)
545 return;
546
547 spin_lock_irqsave(&kmmio_lock, flags);
548 while (size < size_lim) {
549 release_kmmio_fault_page(addr + size, &release_list);
550 size += page_level_size(l);
551 }
552 list_del_rcu(&p->list);
553 kmmio_count--;
554 spin_unlock_irqrestore(&kmmio_lock, flags);
555
556 if (!release_list)
557 return;
558
559 drelease = kmalloc(sizeof(*drelease), GFP_ATOMIC);
560 if (!drelease) {
561 pr_crit("leaking kmmio_fault_page objects.\n");
562 return;
563 }
564 drelease->release_list = release_list;
565
566 /*
567 * This is not really RCU here. We have just disarmed a set of
568 * pages so that they cannot trigger page faults anymore. However,
569 * we cannot remove the pages from kmmio_page_table,
570 * because a probe hit might be in flight on another CPU. The
571 * pages are collected into a list, and they will be removed from
572 * kmmio_page_table when it is certain that no probe hit related to
573 * these pages can be in flight. RCU grace period sounds like a
574 * good choice.
575 *
576 * If we removed the pages too early, kmmio page fault handler might
577 * not find the respective kmmio_fault_page and determine it's not
578 * a kmmio fault, when it actually is. This would lead to madness.
579 */
580 call_rcu(&drelease->rcu, remove_kmmio_fault_pages);
581 }
582 EXPORT_SYMBOL(unregister_kmmio_probe);
583
584 static int
kmmio_die_notifier(struct notifier_block * nb,unsigned long val,void * args)585 kmmio_die_notifier(struct notifier_block *nb, unsigned long val, void *args)
586 {
587 struct die_args *arg = args;
588 unsigned long* dr6_p = (unsigned long *)ERR_PTR(arg->err);
589
590 if (val == DIE_DEBUG && (*dr6_p & DR_STEP))
591 if (post_kmmio_handler(*dr6_p, arg->regs) == 1) {
592 /*
593 * Reset the BS bit in dr6 (pointed by args->err) to
594 * denote completion of processing
595 */
596 *dr6_p &= ~DR_STEP;
597 return NOTIFY_STOP;
598 }
599
600 return NOTIFY_DONE;
601 }
602
603 static struct notifier_block nb_die = {
604 .notifier_call = kmmio_die_notifier
605 };
606
kmmio_init(void)607 int kmmio_init(void)
608 {
609 int i;
610
611 for (i = 0; i < KMMIO_PAGE_TABLE_SIZE; i++)
612 INIT_LIST_HEAD(&kmmio_page_table[i]);
613
614 return register_die_notifier(&nb_die);
615 }
616
kmmio_cleanup(void)617 void kmmio_cleanup(void)
618 {
619 int i;
620
621 unregister_die_notifier(&nb_die);
622 for (i = 0; i < KMMIO_PAGE_TABLE_SIZE; i++) {
623 WARN_ONCE(!list_empty(&kmmio_page_table[i]),
624 KERN_ERR "kmmio_page_table not empty at cleanup, any further tracing will leak memory.\n");
625 }
626 }
627