/kernel/bpf/ |
D | disasm.c | 126 insn->code, insn->dst_reg, in print_bpf_end_insn() 127 BPF_SRC(insn->code) == BPF_TO_BE ? "be" : "le", in print_bpf_end_insn() 136 u8 class = BPF_CLASS(insn->code); in print_bpf_insn() 139 if (BPF_OP(insn->code) == BPF_END) { in print_bpf_insn() 141 verbose(cbs->private_data, "BUG_alu64_%02x\n", insn->code); in print_bpf_insn() 144 } else if (BPF_OP(insn->code) == BPF_NEG) { in print_bpf_insn() 146 insn->code, class == BPF_ALU ? 'w' : 'r', in print_bpf_insn() 149 } else if (BPF_SRC(insn->code) == BPF_X) { in print_bpf_insn() 151 insn->code, class == BPF_ALU ? 'w' : 'r', in print_bpf_insn() 153 bpf_alu_string[BPF_OP(insn->code) >> 4], in print_bpf_insn() [all …]
|
D | verifier.c | 238 return insn->code == (BPF_JMP | BPF_CALL) && in bpf_pseudo_call() 244 return insn->code == (BPF_JMP | BPF_CALL) && in bpf_pseudo_kfunc_call() 540 return BPF_CLASS(insn->code) == BPF_STX && in is_cmpxchg_insn() 541 BPF_MODE(insn->code) == BPF_ATOMIC && in is_cmpxchg_insn() 2244 u8 code = insn[i].code; in check_subprogs() local 2246 if (code == (BPF_JMP | BPF_CALL) && in check_subprogs() 2250 if (BPF_CLASS(code) == BPF_LD && in check_subprogs() 2251 (BPF_MODE(code) == BPF_ABS || BPF_MODE(code) == BPF_IND)) in check_subprogs() 2253 if (BPF_CLASS(code) != BPF_JMP && BPF_CLASS(code) != BPF_JMP32) in check_subprogs() 2255 if (BPF_OP(code) == BPF_EXIT || BPF_OP(code) == BPF_CALL) in check_subprogs() [all …]
|
D | core.c | 301 dst[i].code == (BPF_LD | BPF_IMM | BPF_DW) && in bpf_prog_calc_tag() 307 dst[i].code == 0 && in bpf_prog_calc_tag() 391 u8 code; in bpf_adj_branches() local 408 code = insn->code; in bpf_adj_branches() 409 if ((BPF_CLASS(code) != BPF_JMP && in bpf_adj_branches() 410 BPF_CLASS(code) != BPF_JMP32) || in bpf_adj_branches() 411 BPF_OP(code) == BPF_EXIT) in bpf_adj_branches() 414 if (BPF_OP(code) == BPF_CALL) { in bpf_adj_branches() 1251 (from->code == (BPF_ALU | BPF_MOV | BPF_K) || in bpf_jit_blind_insn() 1252 from->code == (BPF_ALU64 | BPF_MOV | BPF_K))) { in bpf_jit_blind_insn() [all …]
|
D | syscall.c | 3837 u8 code; in bpf_insn_prepare_dump() local 3846 code = insns[i].code; in bpf_insn_prepare_dump() 3848 if (code == (BPF_JMP | BPF_TAIL_CALL)) { in bpf_insn_prepare_dump() 3849 insns[i].code = BPF_JMP | BPF_CALL; in bpf_insn_prepare_dump() 3853 if (code == (BPF_JMP | BPF_CALL) || in bpf_insn_prepare_dump() 3854 code == (BPF_JMP | BPF_CALL_ARGS)) { in bpf_insn_prepare_dump() 3855 if (code == (BPF_JMP | BPF_CALL_ARGS)) in bpf_insn_prepare_dump() 3856 insns[i].code = BPF_JMP | BPF_CALL; in bpf_insn_prepare_dump() 3861 if (BPF_CLASS(code) == BPF_LDX && BPF_MODE(code) == BPF_PROBE_MEM) { in bpf_insn_prepare_dump() 3862 insns[i].code = BPF_LDX | BPF_SIZE(code) | BPF_MEM; in bpf_insn_prepare_dump() [all …]
|
D | Kconfig | 47 allows the kernel to generate native code when a program is loaded
|
/kernel/trace/ |
D | trace_probe_tmpl.h | 7 fetch_store_raw(unsigned long val, struct fetch_insn *code, void *buf) in fetch_store_raw() argument 9 switch (code->size) { in fetch_store_raw() 29 fetch_apply_bitfield(struct fetch_insn *code, void *buf) in fetch_apply_bitfield() argument 31 switch (code->basesize) { in fetch_apply_bitfield() 33 *(u8 *)buf <<= code->lshift; in fetch_apply_bitfield() 34 *(u8 *)buf >>= code->rshift; in fetch_apply_bitfield() 37 *(u16 *)buf <<= code->lshift; in fetch_apply_bitfield() 38 *(u16 *)buf >>= code->rshift; in fetch_apply_bitfield() 41 *(u32 *)buf <<= code->lshift; in fetch_apply_bitfield() 42 *(u32 *)buf >>= code->rshift; in fetch_apply_bitfield() [all …]
|
D | trace_probe.c | 282 struct fetch_insn *code, unsigned int flags, int offs) in parse_probe_vars() argument 289 if (code->data) in parse_probe_vars() 291 code->data = kstrdup(arg, GFP_KERNEL); in parse_probe_vars() 292 if (!code->data) in parse_probe_vars() 294 code->op = FETCH_OP_TP_ARG; in parse_probe_vars() 297 code->op = FETCH_OP_RETVAL; in parse_probe_vars() 304 code->op = FETCH_OP_STACKP; in parse_probe_vars() 314 code->op = FETCH_OP_STACK; in parse_probe_vars() 315 code->param = (unsigned int)param; in parse_probe_vars() 320 code->op = FETCH_OP_COMM; in parse_probe_vars() [all …]
|
D | trace_eprobe.c | 239 if (!strcmp(parg->code->data, field->name)) { in trace_eprobe_tp_arg_update() 240 kfree(parg->code->data); in trace_eprobe_tp_arg_update() 241 parg->code->data = field; in trace_eprobe_tp_arg_update() 250 if (strcmp(parg->code->data, "COMM") == 0 || in trace_eprobe_tp_arg_update() 251 strcmp(parg->code->data, "comm") == 0) { in trace_eprobe_tp_arg_update() 252 parg->code->op = FETCH_OP_COMM; in trace_eprobe_tp_arg_update() 256 kfree(parg->code->data); in trace_eprobe_tp_arg_update() 257 parg->code->data = NULL; in trace_eprobe_tp_arg_update() 323 static unsigned long get_event_field(struct fetch_insn *code, void *rec) in get_event_field() argument 325 struct ftrace_event_field *field = code->data; in get_event_field() [all …]
|
D | trace_uprobe.c | 219 process_fetch_insn(struct fetch_insn *code, void *rec, void *dest, in process_fetch_insn() argument 226 switch (code->op) { in process_fetch_insn() 228 val = regs_get_register(regs, code->param); in process_fetch_insn() 231 val = get_user_stack_nth(regs, code->param); in process_fetch_insn() 240 val = code->immediate; in process_fetch_insn() 246 val = (unsigned long)code->data; in process_fetch_insn() 249 val = translate_user_vaddr(code->immediate); in process_fetch_insn() 254 code++; in process_fetch_insn() 256 return process_fetch_insn_bottom(code, val, dest, base); in process_fetch_insn()
|
D | trace_kprobe.c | 1338 process_fetch_insn(struct fetch_insn *code, void *rec, void *dest, in process_fetch_insn() argument 1346 switch (code->op) { in process_fetch_insn() 1348 val = regs_get_register(regs, code->param); in process_fetch_insn() 1351 val = regs_get_kernel_stack_nth(regs, code->param); in process_fetch_insn() 1360 val = code->immediate; in process_fetch_insn() 1366 val = (unsigned long)code->data; in process_fetch_insn() 1370 val = regs_get_kernel_argument(regs, code->param); in process_fetch_insn() 1374 code++; in process_fetch_insn() 1379 code++; in process_fetch_insn() 1381 return process_fetch_insn_bottom(code, val, dest, base); in process_fetch_insn()
|
D | trace_probe.h | 221 struct fetch_insn *code; member
|
/kernel/ |
D | Kconfig.preempt | 38 "explicit preemption points" to the kernel code. These new 57 all kernel code (that is not executing in a critical section) 64 and a slight runtime overhead to kernel code. 80 low level and critical code paths (entry code, scheduler, low
|
D | seccomp.c | 280 u16 code = ftest->code; in seccomp_check_filter() local 283 switch (code) { in seccomp_check_filter() 285 ftest->code = BPF_LDX | BPF_W | BPF_ABS; in seccomp_check_filter() 291 ftest->code = BPF_LD | BPF_IMM; in seccomp_check_filter() 295 ftest->code = BPF_LDX | BPF_IMM; in seccomp_check_filter() 732 u16 code = insn->code; in seccomp_is_const_allow() local 735 switch (code) { in seccomp_is_const_allow() 759 switch (BPF_OP(code)) { in seccomp_is_const_allow()
|
D | exit.c | 792 static void synchronize_group_exit(struct task_struct *tsk, long code) in synchronize_group_exit() argument 802 signal->group_exit_code = code; in synchronize_group_exit() 808 void __noreturn do_exit(long code) in do_exit() argument 815 synchronize_group_exit(tsk, code); in do_exit() 824 ptrace_event(PTRACE_EVENT_EXIT, code); in do_exit() 845 tsk->signal->group_exit_code ?: (int)code); in do_exit() 854 acct_collect(code, group_dead); in do_exit() 859 tsk->exit_code = code; in do_exit()
|
D | jump_label.c | 70 jea->code = jeb->code - delta; in jump_label_swap() 74 jeb->code = tmp.code + delta; in jump_label_swap()
|
D | signal.c | 1726 int force_sig_fault_to_task(int sig, int code, void __user *addr in force_sig_fault_to_task() argument 1735 info.si_code = code; in force_sig_fault_to_task() 1745 int force_sig_fault(int sig, int code, void __user *addr in force_sig_fault() argument 1748 return force_sig_fault_to_task(sig, code, addr in force_sig_fault() 1752 int send_sig_fault(int sig, int code, void __user *addr in send_sig_fault() argument 1761 info.si_code = code; in send_sig_fault() 1771 int force_sig_mceerr(int code, void __user *addr, short lsb) in force_sig_mceerr() argument 1775 WARN_ON((code != BUS_MCEERR_AO) && (code != BUS_MCEERR_AR)); in force_sig_mceerr() 1779 info.si_code = code; in force_sig_mceerr() 1785 int send_sig_mceerr(int code, void __user *addr, short lsb, struct task_struct *t) in send_sig_mceerr() argument [all …]
|
D | auditsc.c | 1870 int success, long code) in audit_return_fixup() argument 1879 if (unlikely(code <= -ERESTARTSYS) && in audit_return_fixup() 1880 (code >= -ERESTART_RESTARTBLOCK) && in audit_return_fixup() 1881 (code != -ENOIOCTLCMD)) in audit_return_fixup() 1884 ctx->return_code = code; in audit_return_fixup() 1933 void __audit_uring_exit(int success, long code) in __audit_uring_exit() argument 1943 audit_return_fixup(ctx, success, code); in __audit_uring_exit() 2988 void audit_seccomp(unsigned long syscall, long signr, int code) in audit_seccomp() argument 2998 in_compat_syscall(), KSTK_EIP(current), code); in audit_seccomp()
|
D | kthread.c | 331 void __noreturn kthread_complete_and_exit(struct completion *comp, long code) in kthread_complete_and_exit() argument 336 kthread_exit(code); in kthread_complete_and_exit()
|
/kernel/gcov/ |
D | Kconfig | 11 This option enables gcov-based code profiling (e.g. for code coverage
|
/kernel/power/ |
D | Kconfig | 152 This option changes the behavior of various sleep-sensitive code to deal 155 Saying Y here, disables code paths that most users really should keep 202 code. This is helpful when debugging and reporting PM bugs, like 247 This enables code to save the last PM event point across 251 The architecture specific code must provide the extern 265 This enables some cheesy code to save the last PM event point in the
|
/kernel/livepatch/ |
D | Kconfig | 20 to new function code contained in the patch module.
|
/kernel/rcu/ |
D | Kconfig | 79 This option enables generic infrastructure code supporting 124 CPU hotplug code paths. It can force IPIs on online CPUs, 136 This option enables RCU CPU stall code that is common between 159 code paths on small(er) systems.
|
/kernel/module/ |
D | Kconfig | 6 Kernel modules are small pieces of compiled code which can 279 When kernel code requests a module, it does so by calling 298 (especially when using LTO) for optimizing the code and reducing
|
/kernel/irq/ |
D | Kconfig | 3 # Options selectable by the architecture code
|
/kernel/debug/ |
D | debug_core.c | 1042 dbg_notify_reboot(struct notifier_block *this, unsigned long code, void *x) in dbg_notify_reboot() argument 1058 gdbstub_exit(code); in dbg_notify_reboot()
|