• Home
  • Raw
  • Download

Lines Matching refs:op

429 	struct optimized_kprobe *op;  in free_aggr_kprobe()  local
431 op = container_of(p, struct optimized_kprobe, kp); in free_aggr_kprobe()
432 arch_remove_optimized_kprobe(op); in free_aggr_kprobe()
434 kfree(op); in free_aggr_kprobe()
440 struct optimized_kprobe *op; in kprobe_optready() local
443 op = container_of(p, struct optimized_kprobe, kp); in kprobe_optready()
444 return arch_prepared_optinsn(&op->optinsn); in kprobe_optready()
453 struct optimized_kprobe *op; in kprobe_disarmed() local
459 op = container_of(p, struct optimized_kprobe, kp); in kprobe_disarmed()
461 return kprobe_disabled(p) && list_empty(&op->list); in kprobe_disarmed()
467 struct optimized_kprobe *op; in kprobe_queued() local
470 op = container_of(p, struct optimized_kprobe, kp); in kprobe_queued()
471 if (!list_empty(&op->list)) in kprobe_queued()
485 struct optimized_kprobe *op; in get_optimized_kprobe() local
492 op = container_of(p, struct optimized_kprobe, kp); in get_optimized_kprobe()
493 if (arch_within_optimized_kprobe(op, addr)) in get_optimized_kprobe()
542 struct optimized_kprobe *op, *tmp; in do_unoptimize_kprobes() local
552 list_for_each_entry_safe(op, tmp, &freeing_list, list) { in do_unoptimize_kprobes()
554 op->kp.flags &= ~KPROBE_FLAG_OPTIMIZED; in do_unoptimize_kprobes()
556 if (kprobe_disabled(&op->kp) && !kprobe_gone(&op->kp)) in do_unoptimize_kprobes()
557 arch_disarm_kprobe(&op->kp); in do_unoptimize_kprobes()
558 if (kprobe_unused(&op->kp)) { in do_unoptimize_kprobes()
564 hlist_del_rcu(&op->kp.hlist); in do_unoptimize_kprobes()
566 list_del_init(&op->list); in do_unoptimize_kprobes()
573 struct optimized_kprobe *op, *tmp; in do_free_cleaned_kprobes() local
575 list_for_each_entry_safe(op, tmp, &freeing_list, list) { in do_free_cleaned_kprobes()
576 list_del_init(&op->list); in do_free_cleaned_kprobes()
577 if (WARN_ON_ONCE(!kprobe_unused(&op->kp))) { in do_free_cleaned_kprobes()
584 free_aggr_kprobe(&op->kp); in do_free_cleaned_kprobes()
653 bool optprobe_queued_unopt(struct optimized_kprobe *op) in optprobe_queued_unopt() argument
658 if (op == _op) in optprobe_queued_unopt()
668 struct optimized_kprobe *op; in optimize_kprobe() local
679 op = container_of(p, struct optimized_kprobe, kp); in optimize_kprobe()
682 if (arch_check_optimized_kprobe(op) < 0) in optimize_kprobe()
686 if (op->kp.flags & KPROBE_FLAG_OPTIMIZED) { in optimize_kprobe()
687 if (optprobe_queued_unopt(op)) { in optimize_kprobe()
689 list_del_init(&op->list); in optimize_kprobe()
693 op->kp.flags |= KPROBE_FLAG_OPTIMIZED; in optimize_kprobe()
696 if (WARN_ON_ONCE(!list_empty(&op->list))) in optimize_kprobe()
699 list_add(&op->list, &optimizing_list); in optimize_kprobe()
704 static void force_unoptimize_kprobe(struct optimized_kprobe *op) in force_unoptimize_kprobe() argument
707 arch_unoptimize_kprobe(op); in force_unoptimize_kprobe()
708 op->kp.flags &= ~KPROBE_FLAG_OPTIMIZED; in force_unoptimize_kprobe()
714 struct optimized_kprobe *op; in unoptimize_kprobe() local
719 op = container_of(p, struct optimized_kprobe, kp); in unoptimize_kprobe()
723 if (!list_empty(&op->list)) { in unoptimize_kprobe()
724 if (optprobe_queued_unopt(op)) { in unoptimize_kprobe()
731 force_unoptimize_kprobe(op); in unoptimize_kprobe()
732 list_move(&op->list, &freeing_list); in unoptimize_kprobe()
736 list_del_init(&op->list); in unoptimize_kprobe()
737 op->kp.flags &= ~KPROBE_FLAG_OPTIMIZED; in unoptimize_kprobe()
745 force_unoptimize_kprobe(op); in unoptimize_kprobe()
747 list_add(&op->list, &unoptimizing_list); in unoptimize_kprobe()
755 struct optimized_kprobe *op; in reuse_unused_kprobe() local
761 op = container_of(ap, struct optimized_kprobe, kp); in reuse_unused_kprobe()
762 WARN_ON_ONCE(list_empty(&op->list)); in reuse_unused_kprobe()
776 struct optimized_kprobe *op; in kill_optimized_kprobe() local
778 op = container_of(p, struct optimized_kprobe, kp); in kill_optimized_kprobe()
779 if (!list_empty(&op->list)) in kill_optimized_kprobe()
781 list_del_init(&op->list); in kill_optimized_kprobe()
782 op->kp.flags &= ~KPROBE_FLAG_OPTIMIZED; in kill_optimized_kprobe()
790 if (optprobe_queued_unopt(op)) in kill_optimized_kprobe()
791 list_move(&op->list, &freeing_list); in kill_optimized_kprobe()
795 arch_remove_optimized_kprobe(op); in kill_optimized_kprobe()
799 void __prepare_optimized_kprobe(struct optimized_kprobe *op, struct kprobe *p) in __prepare_optimized_kprobe() argument
802 arch_prepare_optimized_kprobe(op, p); in __prepare_optimized_kprobe()
808 struct optimized_kprobe *op; in prepare_optimized_kprobe() local
810 op = container_of(p, struct optimized_kprobe, kp); in prepare_optimized_kprobe()
811 __prepare_optimized_kprobe(op, p); in prepare_optimized_kprobe()
817 struct optimized_kprobe *op; in alloc_aggr_kprobe() local
819 op = kzalloc(sizeof(struct optimized_kprobe), GFP_KERNEL); in alloc_aggr_kprobe()
820 if (!op) in alloc_aggr_kprobe()
823 INIT_LIST_HEAD(&op->list); in alloc_aggr_kprobe()
824 op->kp.addr = p->addr; in alloc_aggr_kprobe()
825 __prepare_optimized_kprobe(op, p); in alloc_aggr_kprobe()
827 return &op->kp; in alloc_aggr_kprobe()
839 struct optimized_kprobe *op; in try_to_optimize_kprobe() local
854 op = container_of(ap, struct optimized_kprobe, kp); in try_to_optimize_kprobe()
855 if (!arch_prepared_optinsn(&op->optinsn)) { in try_to_optimize_kprobe()
857 arch_remove_optimized_kprobe(op); in try_to_optimize_kprobe()
858 kfree(op); in try_to_optimize_kprobe()