/kernel/trace/ |
D | trace_probe.c | 294 code->op = FETCH_OP_TP_ARG; in parse_probe_vars() 297 code->op = FETCH_OP_RETVAL; in parse_probe_vars() 304 code->op = FETCH_OP_STACKP; in parse_probe_vars() 314 code->op = FETCH_OP_STACK; in parse_probe_vars() 320 code->op = FETCH_OP_COMM; in parse_probe_vars() 332 code->op = FETCH_OP_ARG; in parse_probe_vars() 396 code->op = FETCH_OP_REG; in parse_probe_arg() 411 code->op = FETCH_OP_IMM; in parse_probe_arg() 425 code->op = FETCH_OP_FOFFS; in parse_probe_arg() 434 code->op = FETCH_NOP_SYMBOL; in parse_probe_arg() [all …]
|
D | trace_probe_tmpl.h | 114 if (code->op == FETCH_OP_DEREF) { in process_fetch_insn_bottom() 118 } else if (code->op == FETCH_OP_UDEREF) { in process_fetch_insn_bottom() 133 switch (code->op) { in process_fetch_insn_bottom() 151 switch (code->op) { in process_fetch_insn_bottom() 179 if (code->op == FETCH_OP_MOD_BF) { in process_fetch_insn_bottom() 186 if (code->op == FETCH_OP_LP_ARRAY) { in process_fetch_insn_bottom() 192 if (s3->op != FETCH_OP_ST_STRING && in process_fetch_insn_bottom() 193 s3->op != FETCH_OP_ST_USTRING) { in process_fetch_insn_bottom() 210 return code->op == FETCH_OP_END ? ret : -EILSEQ; in process_fetch_insn_bottom()
|
D | ftrace.c | 126 struct ftrace_ops *op, struct ftrace_regs *fregs); 140 struct ftrace_ops *op, struct ftrace_regs *fregs) in ftrace_pid_func() argument 142 struct trace_array *tr = op->private; in ftrace_pid_func() 154 op->saved_func(ip, parent_ip, op, fregs); in ftrace_pid_func() 364 struct ftrace_ops *op; in ftrace_update_pid_func() local 370 do_for_each_ftrace_op(op, ftrace_ops_list) { in ftrace_update_pid_func() 371 if (op->flags & FTRACE_OPS_FL_PID) { in ftrace_update_pid_func() 372 op->func = ftrace_pids_enabled(op) ? in ftrace_update_pid_func() 373 ftrace_pid_func : op->saved_func; in ftrace_update_pid_func() 374 ftrace_update_trampoline(op); in ftrace_update_pid_func() [all …]
|
D | trace_functions.c | 26 struct ftrace_ops *op, struct ftrace_regs *fregs); 29 struct ftrace_ops *op, struct ftrace_regs *fregs); 32 struct ftrace_ops *op, struct ftrace_regs *fregs); 35 struct ftrace_ops *op, 173 struct ftrace_ops *op, struct ftrace_regs *fregs) in function_trace_call() argument 175 struct trace_array *tr = op->private; in function_trace_call() 218 struct ftrace_ops *op, struct ftrace_regs *fregs) in function_stack_trace_call() argument 220 struct trace_array *tr = op->private; in function_stack_trace_call() 281 struct ftrace_ops *op, in function_no_repeats_trace_call() argument 285 struct trace_array *tr = op->private; in function_no_repeats_trace_call() [all …]
|
D | trace_events_filter.c | 79 int op; member 641 switch (pred->op) { \ 851 switch (pred->op) { in filter_pred_cpu() 997 if (pred->op == OP_GLOB) { in filter_build_regex() 1240 static enum filter_pred_fn select_comparison_fn(enum filter_op_ids op, in select_comparison_fn() argument 1246 switch (op) { in select_comparison_fn() 1251 if (WARN_ON_ONCE(op < PRED_FUNC_START)) in select_comparison_fn() 1253 pred_func_index = op - PRED_FUNC_START; in select_comparison_fn() 1360 int op; in parse_pred() local 1400 for (op = 0; ops[op]; op++) { in parse_pred() [all …]
|
D | fgraph.c | 475 struct ftrace_ops *op; in update_function_graph_func() local 484 do_for_each_ftrace_op(op, ftrace_ops_list) { in update_function_graph_func() 485 if (op != &global_ops && op != &graph_ops && in update_function_graph_func() 486 op != &ftrace_list_end) { in update_function_graph_func() 491 } while_for_each_ftrace_op(op); in update_function_graph_func()
|
D | trace_selftest.c | 109 struct ftrace_ops *op, in trace_selftest_test_probe1_func() argument 118 struct ftrace_ops *op, in trace_selftest_test_probe2_func() argument 127 struct ftrace_ops *op, in trace_selftest_test_probe3_func() argument 136 struct ftrace_ops *op, in trace_selftest_test_global_func() argument 145 struct ftrace_ops *op, in trace_selftest_test_dyn_func() argument 450 struct ftrace_ops *op, in trace_selftest_test_recursion_func() argument 465 struct ftrace_ops *op, in trace_selftest_test_recursion_safe_func() argument 584 struct ftrace_ops *op, in trace_selftest_test_regs_func() argument
|
D | trace_eprobe.c | 252 parg->code->op = FETCH_OP_COMM; in trace_eprobe_tp_arg_update() 394 switch (code->op) { in get_eprobe_size() 433 switch (code->op) { in process_fetch_insn() 909 if (ep->tp.args[i].code->op == FETCH_OP_TP_ARG) { in trace_eprobe_tp_update_arg()
|
D | blktrace.c | 230 const enum req_op op = opf & REQ_OP_MASK; in __blk_add_trace() local 235 what |= ddir_act[op_is_write(op) ? WRITE : READ]; in __blk_add_trace() 241 if (op == REQ_OP_DISCARD || op == REQ_OP_SECURE_ERASE) in __blk_add_trace() 243 if (op == REQ_OP_FLUSH) in __blk_add_trace()
|
/kernel/ |
D | kprobes.c | 439 struct optimized_kprobe *op; in free_aggr_kprobe() local 441 op = container_of(p, struct optimized_kprobe, kp); in free_aggr_kprobe() 442 arch_remove_optimized_kprobe(op); in free_aggr_kprobe() 444 kfree(op); in free_aggr_kprobe() 450 struct optimized_kprobe *op; in kprobe_optready() local 453 op = container_of(p, struct optimized_kprobe, kp); in kprobe_optready() 454 return arch_prepared_optinsn(&op->optinsn); in kprobe_optready() 463 struct optimized_kprobe *op; in kprobe_disarmed() local 469 op = container_of(p, struct optimized_kprobe, kp); in kprobe_disarmed() 471 return kprobe_disabled(p) && list_empty(&op->list); in kprobe_disarmed() [all …]
|
D | auditsc.c | 121 enum audit_nfcfgop op; member 338 rc = audit_uid_comparator(uid, f->op, name->uid); in audit_compare_uid() 345 rc = audit_uid_comparator(uid, f->op, n->uid); in audit_compare_uid() 362 rc = audit_gid_comparator(gid, f->op, name->gid); in audit_compare_gid() 369 rc = audit_gid_comparator(gid, f->op, n->gid); in audit_compare_gid() 405 return audit_uid_comparator(cred->uid, f->op, in audit_field_compare() 408 return audit_uid_comparator(cred->uid, f->op, cred->euid); in audit_field_compare() 410 return audit_uid_comparator(cred->uid, f->op, cred->suid); in audit_field_compare() 412 return audit_uid_comparator(cred->uid, f->op, cred->fsuid); in audit_field_compare() 415 return audit_uid_comparator(audit_get_loginuid(tsk), f->op, in audit_field_compare() [all …]
|
D | auditfilter.c | 159 (f->op != Audit_equal && f->op != Audit_not_equal)) in audit_to_inode() 318 static u32 audit_to_op(u32 op) in audit_to_op() argument 321 for (n = Audit_equal; n < Audit_bad && audit_ops[n] != op; n++) in audit_to_op() 391 if (f->op == Audit_bitmask || f->op == Audit_bittest) in audit_field_valid() 411 if (f->op != Audit_not_equal && f->op != Audit_equal) in audit_field_valid() 471 f->op = audit_to_op(data->fieldflags[i]); in audit_data_to_entry() 472 if (f->op == Audit_bad) in audit_data_to_entry() 531 err = security_audit_rule_init(f->type, f->op, str, in audit_data_to_entry() 548 err = audit_to_watch(&entry->rule, str, f_val, f->op); in audit_data_to_entry() 561 err = audit_make_tree(&entry->rule, str, f->op); in audit_data_to_entry() [all …]
|
D | audit.h | 231 extern int audit_comparator(const u32 left, const u32 op, const u32 right); 232 extern int audit_uid_comparator(kuid_t left, u32 op, kuid_t right); 233 extern int audit_gid_comparator(kgid_t left, u32 op, kgid_t right); 270 u32 op); 292 extern int audit_make_tree(struct audit_krule *rule, char *pathname, u32 op); 325 #define audit_make_tree(rule, str, op) -EINVAL argument
|
D | audit_watch.c | 178 int audit_to_watch(struct audit_krule *krule, char *path, int len, u32 op) in audit_to_watch() argument 188 op != Audit_equal || in audit_to_watch() 227 static void audit_watch_log_rule_change(struct audit_krule *r, struct audit_watch *w, char *op) in audit_watch_log_rule_change() argument 237 audit_log_format(ab, "op=%s path=", op); in audit_watch_log_rule_change()
|
D | audit_fsnotify.c | 115 static void audit_mark_log_rule_change(struct audit_fsnotify_mark *audit_mark, char *op) in audit_mark_log_rule_change() argument 126 audit_log_format(ab, " op=%s path=", op); in audit_mark_log_rule_change()
|
D | seccomp.c | 1982 static long do_seccomp(unsigned int op, unsigned int flags, in do_seccomp() argument 1985 switch (op) { in do_seccomp() 2007 SYSCALL_DEFINE3(seccomp, unsigned int, op, unsigned int, flags, in SYSCALL_DEFINE3() argument 2010 return do_seccomp(op, flags, uargs); in SYSCALL_DEFINE3() 2022 unsigned int op; in prctl_set_seccomp() local 2027 op = SECCOMP_SET_MODE_STRICT; in prctl_set_seccomp() 2036 op = SECCOMP_SET_MODE_FILTER; in prctl_set_seccomp() 2044 return do_seccomp(op, 0, uargs); in prctl_set_seccomp()
|
D | ptrace.c | 943 info->op = PTRACE_SYSCALL_INFO_ENTRY; in ptrace_get_syscall_info_entry() 965 info->op = PTRACE_SYSCALL_INFO_SECCOMP; in ptrace_get_syscall_info_seccomp() 976 info->op = PTRACE_SYSCALL_INFO_EXIT; in ptrace_get_syscall_info_exit() 992 .op = PTRACE_SYSCALL_INFO_NONE, in ptrace_get_syscall_info()
|
/kernel/futex/ |
D | syscalls.c | 86 long do_futex(u32 __user *uaddr, int op, u32 val, ktime_t *timeout, in do_futex() argument 89 int cmd = op & FUTEX_CMD_MASK; in do_futex() 92 if (!(op & FUTEX_PRIVATE_FLAG)) in do_futex() 95 if (op & FUTEX_CLOCK_REALTIME) { in do_futex() 153 futex_init_timeout(u32 cmd, u32 op, struct timespec64 *ts, ktime_t *t) in futex_init_timeout() argument 161 else if (cmd != FUTEX_LOCK_PI && !(op & FUTEX_CLOCK_REALTIME)) in futex_init_timeout() 166 SYSCALL_DEFINE6(futex, u32 __user *, uaddr, int, op, u32, val, in SYSCALL_DEFINE6() argument 170 int ret, cmd = op & FUTEX_CMD_MASK; in SYSCALL_DEFINE6() 175 if (unlikely(should_fail_futex(!(op & FUTEX_PRIVATE_FLAG)))) in SYSCALL_DEFINE6() 179 ret = futex_init_timeout(cmd, op, &ts, &t); in SYSCALL_DEFINE6() [all …]
|
D | waitwake.c | 195 unsigned int op = (encoded_op & 0x70000000) >> 28; in futex_atomic_op_inuser() local 216 ret = arch_futex_atomic_op_inuser(op, oparg, &oldval, uaddr); in futex_atomic_op_inuser() 244 int nr_wake, int nr_wake2, int op) in futex_wake_op() argument 265 op_ret = futex_atomic_op_inuser(op, uaddr2); in futex_wake_op()
|
D | futex.h | 288 u32 __user *uaddr2, int nr_wake, int nr_wake2, int op);
|
/kernel/locking/ |
D | spinlock.c | 67 #define BUILD_LOCK_OPS(op, locktype) \ argument 68 void __lockfunc __raw_##op##_lock(locktype##_t *lock) \ 72 if (likely(do_raw_##op##_trylock(lock))) \ 76 arch_##op##_relax(&lock->raw_lock); \ 80 unsigned long __lockfunc __raw_##op##_lock_irqsave(locktype##_t *lock) \ 87 if (likely(do_raw_##op##_trylock(lock))) \ 92 arch_##op##_relax(&lock->raw_lock); \ 98 void __lockfunc __raw_##op##_lock_irq(locktype##_t *lock) \ 100 _raw_##op##_lock_irqsave(lock); \ 103 void __lockfunc __raw_##op##_lock_bh(locktype##_t *lock) \ [all …]
|
/kernel/bpf/ |
D | bpf_iter.c | 128 p = seq->op->start(seq, &seq->index); in bpf_seq_read() 133 seq->op->stop(seq, p); in bpf_seq_read() 138 err = seq->op->show(seq, p); in bpf_seq_read() 148 seq->op->stop(seq, p); in bpf_seq_read() 159 p = seq->op->next(seq, p, &seq->index); in bpf_seq_read() 163 seq->op->next); in bpf_seq_read() 179 seq->op->stop(seq, p); in bpf_seq_read() 185 err = seq->op->show(seq, p); in bpf_seq_read() 194 seq->op->stop(seq, p); in bpf_seq_read() 206 seq->op->stop(seq, NULL); in bpf_seq_read() [all …]
|
D | task_iter.c | 457 enum bpf_task_vma_iter_find_op op; in task_vma_seq_get_next() local 514 op = task_vma_iter_find_vma; in task_vma_seq_get_next() 521 op = task_vma_iter_next_vma; in task_vma_seq_get_next() 533 op = task_vma_iter_first_vma; in task_vma_seq_get_next() 541 op = task_vma_iter_find_vma; in task_vma_seq_get_next() 554 switch (op) { in task_vma_seq_get_next()
|
/kernel/printk/ |
D | index.c | 151 static int pi_module_notify(struct notifier_block *nb, unsigned long op, in pi_module_notify() argument 156 switch (op) { in pi_module_notify()
|
/kernel/kcsan/ |
D | core.c | 1193 #define DEFINE_TSAN_ATOMIC_RMW(op, bits, suffix) \ argument 1194 u##bits __tsan_atomic##bits##_##op(u##bits *ptr, u##bits v, int memorder); \ 1195 u##bits __tsan_atomic##bits##_##op(u##bits *ptr, u##bits v, int memorder) \ 1203 return __atomic_##op##suffix(ptr, v, memorder); \ 1205 EXPORT_SYMBOL(__tsan_atomic##bits##_##op)
|