/kernel/trace/ |
D | trace_probe.c | 281 code->op = FETCH_OP_RETVAL; in parse_probe_vars() 288 code->op = FETCH_OP_STACKP; in parse_probe_vars() 298 code->op = FETCH_OP_STACK; in parse_probe_vars() 304 code->op = FETCH_OP_COMM; in parse_probe_vars() 316 code->op = FETCH_OP_ARG; in parse_probe_vars() 373 code->op = FETCH_OP_REG; in parse_probe_arg() 388 code->op = FETCH_OP_IMM; in parse_probe_arg() 402 code->op = FETCH_OP_FOFFS; in parse_probe_arg() 411 code->op = FETCH_NOP_SYMBOL; in parse_probe_arg() 419 code->op = FETCH_OP_IMM; in parse_probe_arg() [all …]
|
D | trace_probe_tmpl.h | 83 if (code->op == FETCH_OP_DEREF) { in process_fetch_insn_bottom() 87 } else if (code->op == FETCH_OP_UDEREF) { in process_fetch_insn_bottom() 102 if (code->op == FETCH_OP_ST_STRING) { in process_fetch_insn_bottom() 106 } else if (code->op == FETCH_OP_ST_USTRING) { in process_fetch_insn_bottom() 114 switch (code->op) { in process_fetch_insn_bottom() 138 if (code->op == FETCH_OP_MOD_BF) { in process_fetch_insn_bottom() 145 if (code->op == FETCH_OP_LP_ARRAY) { in process_fetch_insn_bottom() 151 if (s3->op != FETCH_OP_ST_STRING && in process_fetch_insn_bottom() 152 s3->op != FETCH_OP_ST_USTRING) { in process_fetch_insn_bottom() 169 return code->op == FETCH_OP_END ? ret : -EILSEQ; in process_fetch_insn_bottom()
|
D | ftrace.c | 124 struct ftrace_ops *op, struct pt_regs *regs); 143 struct ftrace_ops *op, struct pt_regs *regs) in ftrace_pid_func() argument 145 struct trace_array *tr = op->private; in ftrace_pid_func() 157 op->saved_func(ip, parent_ip, op, regs); in ftrace_pid_func() 367 struct ftrace_ops *op; in ftrace_update_pid_func() local 373 do_for_each_ftrace_op(op, ftrace_ops_list) { in ftrace_update_pid_func() 374 if (op->flags & FTRACE_OPS_FL_PID) { in ftrace_update_pid_func() 375 op->func = ftrace_pids_enabled(op) ? in ftrace_update_pid_func() 376 ftrace_pid_func : op->saved_func; in ftrace_update_pid_func() 377 ftrace_update_trampoline(op); in ftrace_update_pid_func() [all …]
|
D | trace_events_filter.c | 788 switch (pred->op) { in filter_pred_cpu() 939 if (pred->op == OP_GLOB) { in filter_build_regex() 1166 static filter_pred_fn_t select_comparison_fn(enum filter_op_ids op, in select_comparison_fn() argument 1172 switch (op) { in select_comparison_fn() 1177 if (WARN_ON_ONCE(op < PRED_FUNC_START)) in select_comparison_fn() 1179 pred_func_index = op - PRED_FUNC_START; in select_comparison_fn() 1237 int op; in parse_pred() local 1277 for (op = 0; ops[op]; op++) { in parse_pred() 1279 if (strncmp(str + i, ops[op], strlen(ops[op])) == 0) in parse_pred() 1283 if (!ops[op]) { in parse_pred() [all …]
|
D | fgraph.c | 463 struct ftrace_ops *op; in update_function_graph_func() local 472 do_for_each_ftrace_op(op, ftrace_ops_list) { in update_function_graph_func() 473 if (op != &global_ops && op != &graph_ops && in update_function_graph_func() 474 op != &ftrace_list_end) { in update_function_graph_func() 479 } while_for_each_ftrace_op(op); in update_function_graph_func()
|
D | trace_selftest.c | 109 struct ftrace_ops *op, in trace_selftest_test_probe1_func() argument 118 struct ftrace_ops *op, in trace_selftest_test_probe2_func() argument 127 struct ftrace_ops *op, in trace_selftest_test_probe3_func() argument 136 struct ftrace_ops *op, in trace_selftest_test_global_func() argument 145 struct ftrace_ops *op, in trace_selftest_test_dyn_func() argument 419 struct ftrace_ops *op, in trace_selftest_test_recursion_func() argument 434 struct ftrace_ops *op, in trace_selftest_test_recursion_safe_func() argument 553 struct ftrace_ops *op, in trace_selftest_test_regs_func() argument
|
D | trace_functions.c | 26 struct ftrace_ops *op, struct pt_regs *pt_regs); 29 struct ftrace_ops *op, struct pt_regs *pt_regs); 132 struct ftrace_ops *op, struct pt_regs *pt_regs) in function_trace_call() argument 134 struct trace_array *tr = op->private; in function_trace_call() 183 struct ftrace_ops *op, struct pt_regs *pt_regs) in function_stack_trace_call() argument 185 struct trace_array *tr = op->private; in function_stack_trace_call()
|
D | blktrace.c | 216 int op, int op_flags, u32 what, int error, int pdu_len, in __blk_add_trace() argument 233 what |= ddir_act[op_is_write(op) ? WRITE : READ]; in __blk_add_trace() 239 if (op == REQ_OP_DISCARD || op == REQ_OP_SECURE_ERASE) in __blk_add_trace() 241 if (op == REQ_OP_FLUSH) in __blk_add_trace() 1968 void blk_fill_rwbs(char *rwbs, unsigned int op, int bytes) in blk_fill_rwbs() argument 1972 if (op & REQ_PREFLUSH) in blk_fill_rwbs() 1975 switch (op & REQ_OP_MASK) { in blk_fill_rwbs() 1997 if (op & REQ_FUA) in blk_fill_rwbs() 1999 if (op & REQ_RAHEAD) in blk_fill_rwbs() 2001 if (op & REQ_SYNC) in blk_fill_rwbs() [all …]
|
D | trace_stack.c | 293 struct ftrace_ops *op, struct pt_regs *pt_regs) in stack_trace_call() argument
|
D | bpf_trace.c | 2200 static int bpf_event_notify(struct notifier_block *nb, unsigned long op, in bpf_event_notify() argument 2208 (op != MODULE_STATE_COMING && op != MODULE_STATE_GOING)) in bpf_event_notify() 2213 switch (op) { in bpf_event_notify()
|
D | trace_probe.h | 110 enum fetch_op op; member
|
D | trace_irqsoff.c | 141 struct ftrace_ops *op, struct pt_regs *pt_regs) in irqsoff_tracer_call() argument
|
/kernel/ |
D | kprobes.c | 429 struct optimized_kprobe *op; in free_aggr_kprobe() local 431 op = container_of(p, struct optimized_kprobe, kp); in free_aggr_kprobe() 432 arch_remove_optimized_kprobe(op); in free_aggr_kprobe() 434 kfree(op); in free_aggr_kprobe() 440 struct optimized_kprobe *op; in kprobe_optready() local 443 op = container_of(p, struct optimized_kprobe, kp); in kprobe_optready() 444 return arch_prepared_optinsn(&op->optinsn); in kprobe_optready() 453 struct optimized_kprobe *op; in kprobe_disarmed() local 459 op = container_of(p, struct optimized_kprobe, kp); in kprobe_disarmed() 461 return kprobe_disabled(p) && list_empty(&op->list); in kprobe_disarmed() [all …]
|
D | auditsc.c | 135 enum audit_nfcfgop op; member 344 rc = audit_uid_comparator(uid, f->op, name->uid); in audit_compare_uid() 351 rc = audit_uid_comparator(uid, f->op, n->uid); in audit_compare_uid() 368 rc = audit_gid_comparator(gid, f->op, name->gid); in audit_compare_gid() 375 rc = audit_gid_comparator(gid, f->op, n->gid); in audit_compare_gid() 411 return audit_uid_comparator(cred->uid, f->op, in audit_field_compare() 414 return audit_uid_comparator(cred->uid, f->op, cred->euid); in audit_field_compare() 416 return audit_uid_comparator(cred->uid, f->op, cred->suid); in audit_field_compare() 418 return audit_uid_comparator(cred->uid, f->op, cred->fsuid); in audit_field_compare() 421 return audit_uid_comparator(audit_get_loginuid(tsk), f->op, in audit_field_compare() [all …]
|
D | auditfilter.c | 156 (f->op != Audit_equal && f->op != Audit_not_equal)) in audit_to_inode() 314 static u32 audit_to_op(u32 op) in audit_to_op() argument 317 for (n = Audit_equal; n < Audit_bad && audit_ops[n] != op; n++) in audit_to_op() 383 if (f->op == Audit_bitmask || f->op == Audit_bittest) in audit_field_valid() 403 if (f->op != Audit_not_equal && f->op != Audit_equal) in audit_field_valid() 463 f->op = audit_to_op(data->fieldflags[i]); in audit_data_to_entry() 464 if (f->op == Audit_bad) in audit_data_to_entry() 523 err = security_audit_rule_init(f->type, f->op, str, in audit_data_to_entry() 540 err = audit_to_watch(&entry->rule, str, f_val, f->op); in audit_data_to_entry() 553 err = audit_make_tree(&entry->rule, str, f->op); in audit_data_to_entry() [all …]
|
D | audit.h | 221 extern int audit_comparator(const u32 left, const u32 op, const u32 right); 222 extern int audit_uid_comparator(kuid_t left, u32 op, kuid_t right); 223 extern int audit_gid_comparator(kgid_t left, u32 op, kgid_t right); 262 u32 op); 284 extern int audit_make_tree(struct audit_krule *rule, char *pathname, u32 op); 317 #define audit_make_tree(rule, str, op) -EINVAL argument
|
D | audit_watch.c | 178 int audit_to_watch(struct audit_krule *krule, char *path, int len, u32 op) in audit_to_watch() argument 187 op != Audit_equal || in audit_to_watch() 226 static void audit_watch_log_rule_change(struct audit_krule *r, struct audit_watch *w, char *op) in audit_watch_log_rule_change() argument 236 audit_log_format(ab, "op=%s path=", op); in audit_watch_log_rule_change()
|
D | audit_fsnotify.c | 115 static void audit_mark_log_rule_change(struct audit_fsnotify_mark *audit_mark, char *op) in audit_mark_log_rule_change() argument 126 audit_log_format(ab, " op=%s path=", op); in audit_mark_log_rule_change()
|
D | seccomp.c | 1924 static long do_seccomp(unsigned int op, unsigned int flags, in do_seccomp() argument 1927 switch (op) { in do_seccomp() 1949 SYSCALL_DEFINE3(seccomp, unsigned int, op, unsigned int, flags, in SYSCALL_DEFINE3() argument 1952 return do_seccomp(op, flags, uargs); in SYSCALL_DEFINE3() 1964 unsigned int op; in prctl_set_seccomp() local 1969 op = SECCOMP_SET_MODE_STRICT; in prctl_set_seccomp() 1978 op = SECCOMP_SET_MODE_FILTER; in prctl_set_seccomp() 1986 return do_seccomp(op, 0, uargs); in prctl_set_seccomp()
|
D | ptrace.c | 942 info->op = PTRACE_SYSCALL_INFO_ENTRY; in ptrace_get_syscall_info_entry() 964 info->op = PTRACE_SYSCALL_INFO_SECCOMP; in ptrace_get_syscall_info_seccomp() 975 info->op = PTRACE_SYSCALL_INFO_EXIT; in ptrace_get_syscall_info_exit() 991 .op = PTRACE_SYSCALL_INFO_NONE, in ptrace_get_syscall_info()
|
D | audit_tree.c | 726 int audit_make_tree(struct audit_krule *rule, char *pathname, u32 op) in audit_make_tree() argument 731 op != Audit_equal || in audit_make_tree()
|
/kernel/locking/ |
D | spinlock.c | 67 #define BUILD_LOCK_OPS(op, locktype) \ argument 68 void __lockfunc __raw_##op##_lock(locktype##_t *lock) \ 72 if (likely(do_raw_##op##_trylock(lock))) \ 76 arch_##op##_relax(&lock->raw_lock); \ 80 unsigned long __lockfunc __raw_##op##_lock_irqsave(locktype##_t *lock) \ 87 if (likely(do_raw_##op##_trylock(lock))) \ 92 arch_##op##_relax(&lock->raw_lock); \ 98 void __lockfunc __raw_##op##_lock_irq(locktype##_t *lock) \ 100 _raw_##op##_lock_irqsave(lock); \ 103 void __lockfunc __raw_##op##_lock_bh(locktype##_t *lock) \ [all …]
|
/kernel/bpf/ |
D | bpf_iter.c | 113 p = seq->op->start(seq, &seq->index); in bpf_seq_read() 118 seq->op->stop(seq, p); in bpf_seq_read() 123 err = seq->op->show(seq, p); in bpf_seq_read() 133 seq->op->stop(seq, p); in bpf_seq_read() 143 p = seq->op->next(seq, p, &seq->index); in bpf_seq_read() 147 seq->op->next); in bpf_seq_read() 163 seq->op->stop(seq, p); in bpf_seq_read() 169 err = seq->op->show(seq, p); in bpf_seq_read() 178 seq->op->stop(seq, p); in bpf_seq_read() 187 seq->op->stop(seq, p); in bpf_seq_read()
|
/kernel/futex/ |
D | core.c | 1652 unsigned int op = (encoded_op & 0x70000000) >> 28; in futex_atomic_op_inuser() local 1673 ret = arch_futex_atomic_op_inuser(op, oparg, &oldval, uaddr); in futex_atomic_op_inuser() 1702 int nr_wake, int nr_wake2, int op) in futex_wake_op() argument 1723 op_ret = futex_atomic_op_inuser(op, uaddr2); in futex_wake_op() 3735 long do_futex(u32 __user *uaddr, int op, u32 val, ktime_t *timeout, in do_futex() argument 3738 int cmd = op & FUTEX_CMD_MASK; in do_futex() 3741 if (!(op & FUTEX_PRIVATE_FLAG)) in do_futex() 3744 if (op & FUTEX_CLOCK_REALTIME) { in do_futex() 3795 SYSCALL_DEFINE6(futex, u32 __user *, uaddr, int, op, u32, val, in SYSCALL_DEFINE6() argument 3802 int cmd = op & FUTEX_CMD_MASK; in SYSCALL_DEFINE6() [all …]
|
/kernel/kcsan/ |
D | core.c | 961 #define DEFINE_TSAN_ATOMIC_RMW(op, bits, suffix) \ argument 962 u##bits __tsan_atomic##bits##_##op(u##bits *ptr, u##bits v, int memorder); \ 963 u##bits __tsan_atomic##bits##_##op(u##bits *ptr, u##bits v, int memorder) \ 970 return __atomic_##op##suffix(ptr, v, memorder); \ 972 EXPORT_SYMBOL(__tsan_atomic##bits##_##op)
|