• Home
  • Raw
  • Download

Lines Matching +full:a +full:- +full:f

1 // SPDX-License-Identifier: GPL-2.0
42 * Number of times this page has been registered as a part
43 * of a probe. If zero, page is disarmed and this may be freed.
70 /* Read-protected by RCU, write-protected by kmmio_lock. */
86 /* Accessed per-cpu */
90 * this is basically a dynamic stabbing problem:
93 * The Interval Skip List: A Data Structure for Finding All Intervals That
94 * Overlap a Point (might be simple)
95 * Space Efficient Dynamic Stabbing with Fast Queries - Mikkel Thorup
102 if (addr >= p->addr && addr < (p->addr + p->len)) in get_kmmio_probe()
112 struct kmmio_fault_page *f; in get_kmmio_fault_page() local
120 list_for_each_entry_rcu(f, head, list) { in get_kmmio_fault_page()
121 if (f->addr == addr) in get_kmmio_fault_page()
122 return f; in get_kmmio_fault_page()
154 static int clear_page_presence(struct kmmio_fault_page *f, bool clear) in clear_page_presence() argument
157 pte_t *pte = lookup_address(f->addr, &level); in clear_page_presence()
160 pr_err("no pte for addr 0x%08lx\n", f->addr); in clear_page_presence()
161 return -1; in clear_page_presence()
166 clear_pmd_presence((pmd_t *)pte, clear, &f->old_presence); in clear_page_presence()
169 clear_pte_presence(pte, clear, &f->old_presence); in clear_page_presence()
173 return -1; in clear_page_presence()
176 flush_tlb_one_kernel(f->addr); in clear_page_presence()
181 * Mark the given page as not present. Access to it will trigger a fault.
188 * Double disarming on the other hand is allowed, and may occur when a fault
191 static int arm_kmmio_fault_page(struct kmmio_fault_page *f) in arm_kmmio_fault_page() argument
194 WARN_ONCE(f->armed, KERN_ERR pr_fmt("kmmio page already armed.\n")); in arm_kmmio_fault_page()
195 if (f->armed) { in arm_kmmio_fault_page()
196 pr_warn("double-arm: addr 0x%08lx, ref %d, old %d\n", in arm_kmmio_fault_page()
197 f->addr, f->count, !!f->old_presence); in arm_kmmio_fault_page()
199 ret = clear_page_presence(f, true); in arm_kmmio_fault_page()
201 f->addr); in arm_kmmio_fault_page()
202 f->armed = true; in arm_kmmio_fault_page()
207 static void disarm_kmmio_fault_page(struct kmmio_fault_page *f) in disarm_kmmio_fault_page() argument
209 int ret = clear_page_presence(f, false); in disarm_kmmio_fault_page()
211 KERN_ERR "kmmio disarming at 0x%08lx failed.\n", f->addr); in disarm_kmmio_fault_page()
212 f->armed = false; in disarm_kmmio_fault_page()
218 * We may be in an interrupt or a critical section. Also prefecthing may
219 * trigger a page fault. We may be in the middle of process switch.
221 * within a kmmio critical section.
239 return -EINVAL; in kmmio_handler()
264 if (ctx->active) { in kmmio_handler()
265 if (page_base == ctx->addr) { in kmmio_handler()
267 * A second fault on the same page means some other in kmmio_handler()
274 if (!faultpage->old_presence) in kmmio_handler()
279 * Prevent overwriting already in-flight context. in kmmio_handler()
281 * least prevents a panic. in kmmio_handler()
285 pr_emerg("previous hit was at 0x%08lx.\n", ctx->addr); in kmmio_handler()
290 ctx->active++; in kmmio_handler()
292 ctx->fpage = faultpage; in kmmio_handler()
293 ctx->probe = get_kmmio_probe(page_base); in kmmio_handler()
294 ctx->saved_flags = (regs->flags & (X86_EFLAGS_TF | X86_EFLAGS_IF)); in kmmio_handler()
295 ctx->addr = page_base; in kmmio_handler()
297 if (ctx->probe && ctx->probe->pre_handler) in kmmio_handler()
298 ctx->probe->pre_handler(ctx->probe, regs, addr); in kmmio_handler()
301 * Enable single-stepping and disable interrupts for the faulting in kmmio_handler()
304 regs->flags |= X86_EFLAGS_TF; in kmmio_handler()
305 regs->flags &= ~X86_EFLAGS_IF; in kmmio_handler()
308 disarm_kmmio_fault_page(ctx->fpage); in kmmio_handler()
313 * only downside is we lose the event. If this becomes a problem, in kmmio_handler()
335 if (!ctx->active) { in post_kmmio_handler()
338 * something external causing them (f.e. using a debugger while in post_kmmio_handler()
345 if (ctx->probe && ctx->probe->post_handler) in post_kmmio_handler()
346 ctx->probe->post_handler(ctx->probe, condition, regs); in post_kmmio_handler()
350 if (ctx->fpage->count) in post_kmmio_handler()
351 arm_kmmio_fault_page(ctx->fpage); in post_kmmio_handler()
354 regs->flags &= ~X86_EFLAGS_TF; in post_kmmio_handler()
355 regs->flags |= ctx->saved_flags; in post_kmmio_handler()
358 ctx->active--; in post_kmmio_handler()
359 BUG_ON(ctx->active); in post_kmmio_handler()
364 * if somebody else is singlestepping across a probe point, flags in post_kmmio_handler()
366 * of do_debug, as if this is not a probe hit. in post_kmmio_handler()
368 if (!(regs->flags & X86_EFLAGS_TF)) in post_kmmio_handler()
377 struct kmmio_fault_page *f; in add_kmmio_fault_page() local
379 f = get_kmmio_fault_page(addr); in add_kmmio_fault_page()
380 if (f) { in add_kmmio_fault_page()
381 if (!f->count) in add_kmmio_fault_page()
382 arm_kmmio_fault_page(f); in add_kmmio_fault_page()
383 f->count++; in add_kmmio_fault_page()
387 f = kzalloc(sizeof(*f), GFP_ATOMIC); in add_kmmio_fault_page()
388 if (!f) in add_kmmio_fault_page()
389 return -1; in add_kmmio_fault_page()
391 f->count = 1; in add_kmmio_fault_page()
392 f->addr = addr; in add_kmmio_fault_page()
394 if (arm_kmmio_fault_page(f)) { in add_kmmio_fault_page()
395 kfree(f); in add_kmmio_fault_page()
396 return -1; in add_kmmio_fault_page()
399 list_add_rcu(&f->list, kmmio_page_list(f->addr)); in add_kmmio_fault_page()
408 struct kmmio_fault_page *f; in release_kmmio_fault_page() local
410 f = get_kmmio_fault_page(addr); in release_kmmio_fault_page()
411 if (!f) in release_kmmio_fault_page()
414 f->count--; in release_kmmio_fault_page()
415 BUG_ON(f->count < 0); in release_kmmio_fault_page()
416 if (!f->count) { in release_kmmio_fault_page()
417 disarm_kmmio_fault_page(f); in release_kmmio_fault_page()
418 if (!f->scheduled_for_release) { in release_kmmio_fault_page()
419 f->release_next = *release_list; in release_kmmio_fault_page()
420 *release_list = f; in release_kmmio_fault_page()
421 f->scheduled_for_release = true; in release_kmmio_fault_page()
427 * With page-unaligned ioremaps, one or two armed pages may contain
430 * mistakes by accessing addresses before the beginning or past the end of a
438 unsigned long addr = p->addr & PAGE_MASK; in register_kmmio_probe()
439 const unsigned long size_lim = p->len + (p->addr & ~PAGE_MASK); in register_kmmio_probe()
445 ret = -EEXIST; in register_kmmio_probe()
451 ret = -EINVAL; in register_kmmio_probe()
456 list_add_rcu(&p->list, &kmmio_probes); in register_kmmio_probe()
466 * Here was a call to global_flush_tlb(), but it does not exist in register_kmmio_probe()
479 struct kmmio_fault_page *f = dr->release_list; in rcu_free_kmmio_fault_pages() local
480 while (f) { in rcu_free_kmmio_fault_pages()
481 struct kmmio_fault_page *next = f->release_next; in rcu_free_kmmio_fault_pages()
482 BUG_ON(f->count); in rcu_free_kmmio_fault_pages()
483 kfree(f); in rcu_free_kmmio_fault_pages()
484 f = next; in rcu_free_kmmio_fault_pages()
493 struct kmmio_fault_page *f = dr->release_list; in remove_kmmio_fault_pages() local
494 struct kmmio_fault_page **prevp = &dr->release_list; in remove_kmmio_fault_pages()
498 while (f) { in remove_kmmio_fault_pages()
499 if (!f->count) { in remove_kmmio_fault_pages()
500 list_del_rcu(&f->list); in remove_kmmio_fault_pages()
501 prevp = &f->release_next; in remove_kmmio_fault_pages()
503 *prevp = f->release_next; in remove_kmmio_fault_pages()
504 f->release_next = NULL; in remove_kmmio_fault_pages()
505 f->scheduled_for_release = false; in remove_kmmio_fault_pages()
507 f = *prevp; in remove_kmmio_fault_pages()
512 call_rcu(&dr->rcu, rcu_free_kmmio_fault_pages); in remove_kmmio_fault_pages()
516 * Remove a kmmio probe. You have to synchronize_rcu() before you can be
520 * Unregistering a kmmio fault page has three steps:
522 * Disarm the page, wait a grace period to let all faults finish.
532 unsigned long addr = p->addr & PAGE_MASK; in unregister_kmmio_probe()
533 const unsigned long size_lim = p->len + (p->addr & ~PAGE_MASK); in unregister_kmmio_probe()
548 list_del_rcu(&p->list); in unregister_kmmio_probe()
549 kmmio_count--; in unregister_kmmio_probe()
560 drelease->release_list = release_list; in unregister_kmmio_probe()
563 * This is not really RCU here. We have just disarmed a set of in unregister_kmmio_probe()
566 * because a probe hit might be in flight on another CPU. The in unregister_kmmio_probe()
567 * pages are collected into a list, and they will be removed from in unregister_kmmio_probe()
569 * these pages can be in flight. RCU grace period sounds like a in unregister_kmmio_probe()
574 * a kmmio fault, when it actually is. This would lead to madness. in unregister_kmmio_probe()
576 call_rcu(&drelease->rcu, remove_kmmio_fault_pages); in unregister_kmmio_probe()
584 unsigned long* dr6_p = (unsigned long *)ERR_PTR(arg->err); in kmmio_die_notifier()
587 if (post_kmmio_handler(*dr6_p, arg->regs) == 1) { in kmmio_die_notifier()
589 * Reset the BS bit in dr6 (pointed by args->err) to in kmmio_die_notifier()