• Home
  • Raw
  • Download

Lines Matching refs:op

388 	struct optimized_kprobe *op;  in free_aggr_kprobe()  local
390 op = container_of(p, struct optimized_kprobe, kp); in free_aggr_kprobe()
391 arch_remove_optimized_kprobe(op); in free_aggr_kprobe()
393 kfree(op); in free_aggr_kprobe()
399 struct optimized_kprobe *op; in kprobe_optready() local
402 op = container_of(p, struct optimized_kprobe, kp); in kprobe_optready()
403 return arch_prepared_optinsn(&op->optinsn); in kprobe_optready()
412 struct optimized_kprobe *op; in kprobe_disarmed() local
418 op = container_of(p, struct optimized_kprobe, kp); in kprobe_disarmed()
420 return kprobe_disabled(p) && list_empty(&op->list); in kprobe_disarmed()
426 struct optimized_kprobe *op; in kprobe_queued() local
429 op = container_of(p, struct optimized_kprobe, kp); in kprobe_queued()
430 if (!list_empty(&op->list)) in kprobe_queued()
444 struct optimized_kprobe *op; in get_optimized_kprobe() local
451 op = container_of(p, struct optimized_kprobe, kp); in get_optimized_kprobe()
452 if (arch_within_optimized_kprobe(op, addr)) in get_optimized_kprobe()
501 struct optimized_kprobe *op, *tmp; in do_unoptimize_kprobes() local
513 list_for_each_entry_safe(op, tmp, &freeing_list, list) { in do_unoptimize_kprobes()
515 op->kp.flags &= ~KPROBE_FLAG_OPTIMIZED; in do_unoptimize_kprobes()
517 if (kprobe_disabled(&op->kp)) in do_unoptimize_kprobes()
518 arch_disarm_kprobe(&op->kp); in do_unoptimize_kprobes()
519 if (kprobe_unused(&op->kp)) { in do_unoptimize_kprobes()
525 hlist_del_rcu(&op->kp.hlist); in do_unoptimize_kprobes()
527 list_del_init(&op->list); in do_unoptimize_kprobes()
534 struct optimized_kprobe *op, *tmp; in do_free_cleaned_kprobes() local
536 list_for_each_entry_safe(op, tmp, &freeing_list, list) { in do_free_cleaned_kprobes()
537 list_del_init(&op->list); in do_free_cleaned_kprobes()
538 if (WARN_ON_ONCE(!kprobe_unused(&op->kp))) { in do_free_cleaned_kprobes()
545 free_aggr_kprobe(&op->kp); in do_free_cleaned_kprobes()
617 bool optprobe_queued_unopt(struct optimized_kprobe *op) in optprobe_queued_unopt() argument
622 if (op == _op) in optprobe_queued_unopt()
632 struct optimized_kprobe *op; in optimize_kprobe() local
643 op = container_of(p, struct optimized_kprobe, kp); in optimize_kprobe()
646 if (arch_check_optimized_kprobe(op) < 0) in optimize_kprobe()
650 if (op->kp.flags & KPROBE_FLAG_OPTIMIZED) { in optimize_kprobe()
651 if (optprobe_queued_unopt(op)) { in optimize_kprobe()
653 list_del_init(&op->list); in optimize_kprobe()
657 op->kp.flags |= KPROBE_FLAG_OPTIMIZED; in optimize_kprobe()
660 if (WARN_ON_ONCE(!list_empty(&op->list))) in optimize_kprobe()
663 list_add(&op->list, &optimizing_list); in optimize_kprobe()
668 static void force_unoptimize_kprobe(struct optimized_kprobe *op) in force_unoptimize_kprobe() argument
671 arch_unoptimize_kprobe(op); in force_unoptimize_kprobe()
672 op->kp.flags &= ~KPROBE_FLAG_OPTIMIZED; in force_unoptimize_kprobe()
673 if (kprobe_disabled(&op->kp)) in force_unoptimize_kprobe()
674 arch_disarm_kprobe(&op->kp); in force_unoptimize_kprobe()
680 struct optimized_kprobe *op; in unoptimize_kprobe() local
685 op = container_of(p, struct optimized_kprobe, kp); in unoptimize_kprobe()
689 if (!list_empty(&op->list)) { in unoptimize_kprobe()
690 if (optprobe_queued_unopt(op)) { in unoptimize_kprobe()
697 force_unoptimize_kprobe(op); in unoptimize_kprobe()
698 list_move(&op->list, &freeing_list); in unoptimize_kprobe()
702 list_del_init(&op->list); in unoptimize_kprobe()
703 op->kp.flags &= ~KPROBE_FLAG_OPTIMIZED; in unoptimize_kprobe()
711 force_unoptimize_kprobe(op); in unoptimize_kprobe()
713 list_add(&op->list, &unoptimizing_list); in unoptimize_kprobe()
721 struct optimized_kprobe *op; in reuse_unused_kprobe() local
727 op = container_of(ap, struct optimized_kprobe, kp); in reuse_unused_kprobe()
728 WARN_ON_ONCE(list_empty(&op->list)); in reuse_unused_kprobe()
742 struct optimized_kprobe *op; in kill_optimized_kprobe() local
744 op = container_of(p, struct optimized_kprobe, kp); in kill_optimized_kprobe()
745 if (!list_empty(&op->list)) in kill_optimized_kprobe()
747 list_del_init(&op->list); in kill_optimized_kprobe()
748 op->kp.flags &= ~KPROBE_FLAG_OPTIMIZED; in kill_optimized_kprobe()
752 list_add(&op->list, &freeing_list); in kill_optimized_kprobe()
758 hlist_del_rcu(&op->kp.hlist); in kill_optimized_kprobe()
762 arch_remove_optimized_kprobe(op); in kill_optimized_kprobe()
766 void __prepare_optimized_kprobe(struct optimized_kprobe *op, struct kprobe *p) in __prepare_optimized_kprobe() argument
769 arch_prepare_optimized_kprobe(op, p); in __prepare_optimized_kprobe()
775 struct optimized_kprobe *op; in prepare_optimized_kprobe() local
777 op = container_of(p, struct optimized_kprobe, kp); in prepare_optimized_kprobe()
778 __prepare_optimized_kprobe(op, p); in prepare_optimized_kprobe()
784 struct optimized_kprobe *op; in alloc_aggr_kprobe() local
786 op = kzalloc(sizeof(struct optimized_kprobe), GFP_KERNEL); in alloc_aggr_kprobe()
787 if (!op) in alloc_aggr_kprobe()
790 INIT_LIST_HEAD(&op->list); in alloc_aggr_kprobe()
791 op->kp.addr = p->addr; in alloc_aggr_kprobe()
792 __prepare_optimized_kprobe(op, p); in alloc_aggr_kprobe()
794 return &op->kp; in alloc_aggr_kprobe()
806 struct optimized_kprobe *op; in try_to_optimize_kprobe() local
821 op = container_of(ap, struct optimized_kprobe, kp); in try_to_optimize_kprobe()
822 if (!arch_prepared_optinsn(&op->optinsn)) { in try_to_optimize_kprobe()
824 arch_remove_optimized_kprobe(op); in try_to_optimize_kprobe()
825 kfree(op); in try_to_optimize_kprobe()