Home
last modified time | relevance | path

Searched refs:code (Results 1 – 25 of 25) sorted by relevance

/kernel/trace/
Dtrace_probe_tmpl.h7 fetch_store_raw(unsigned long val, struct fetch_insn *code, void *buf) in fetch_store_raw() argument
9 switch (code->size) { in fetch_store_raw()
29 fetch_apply_bitfield(struct fetch_insn *code, void *buf) in fetch_apply_bitfield() argument
31 switch (code->basesize) { in fetch_apply_bitfield()
33 *(u8 *)buf <<= code->lshift; in fetch_apply_bitfield()
34 *(u8 *)buf >>= code->rshift; in fetch_apply_bitfield()
37 *(u16 *)buf <<= code->lshift; in fetch_apply_bitfield()
38 *(u16 *)buf >>= code->rshift; in fetch_apply_bitfield()
41 *(u32 *)buf <<= code->lshift; in fetch_apply_bitfield()
42 *(u32 *)buf >>= code->rshift; in fetch_apply_bitfield()
[all …]
Dtrace_probe.c273 struct fetch_insn *code, unsigned int flags, int offs) in parse_probe_vars() argument
281 code->op = FETCH_OP_RETVAL; in parse_probe_vars()
288 code->op = FETCH_OP_STACKP; in parse_probe_vars()
298 code->op = FETCH_OP_STACK; in parse_probe_vars()
299 code->param = (unsigned int)param; in parse_probe_vars()
304 code->op = FETCH_OP_COMM; in parse_probe_vars()
316 code->op = FETCH_OP_ARG; in parse_probe_vars()
317 code->param = (unsigned int)param - 1; in parse_probe_vars()
358 struct fetch_insn *code = *pcode; in parse_probe_arg() local
367 ret = parse_probe_vars(arg + 1, type, code, flags, offs); in parse_probe_arg()
[all …]
Dtrace_kprobe.c1126 process_fetch_insn(struct fetch_insn *code, struct pt_regs *regs, void *dest, in process_fetch_insn() argument
1133 switch (code->op) { in process_fetch_insn()
1135 val = regs_get_register(regs, code->param); in process_fetch_insn()
1138 val = regs_get_kernel_stack_nth(regs, code->param); in process_fetch_insn()
1147 val = code->immediate; in process_fetch_insn()
1153 val = (unsigned long)code->data; in process_fetch_insn()
1157 val = regs_get_kernel_argument(regs, code->param); in process_fetch_insn()
1161 code++; in process_fetch_insn()
1166 code++; in process_fetch_insn()
1168 return process_fetch_insn_bottom(code, val, dest, base); in process_fetch_insn()
Dtrace_uprobe.c226 process_fetch_insn(struct fetch_insn *code, struct pt_regs *regs, void *dest, in process_fetch_insn() argument
232 switch (code->op) { in process_fetch_insn()
234 val = regs_get_register(regs, code->param); in process_fetch_insn()
237 val = get_user_stack_nth(regs, code->param); in process_fetch_insn()
246 val = code->immediate; in process_fetch_insn()
252 val = (unsigned long)code->data; in process_fetch_insn()
255 val = translate_user_vaddr(code->immediate); in process_fetch_insn()
260 code++; in process_fetch_insn()
262 return process_fetch_insn_bottom(code, val, dest, base); in process_fetch_insn()
Dtrace_probe.h216 struct fetch_insn *code; member
DKconfig606 It basically just enables each event and runs some code that
/kernel/bpf/
Ddisasm.c112 insn->code, insn->dst_reg, in print_bpf_end_insn()
113 BPF_SRC(insn->code) == BPF_TO_BE ? "be" : "le", in print_bpf_end_insn()
122 u8 class = BPF_CLASS(insn->code); in print_bpf_insn()
125 if (BPF_OP(insn->code) == BPF_END) { in print_bpf_insn()
127 verbose(cbs->private_data, "BUG_alu64_%02x\n", insn->code); in print_bpf_insn()
130 } else if (BPF_OP(insn->code) == BPF_NEG) { in print_bpf_insn()
132 insn->code, class == BPF_ALU ? 'w' : 'r', in print_bpf_insn()
135 } else if (BPF_SRC(insn->code) == BPF_X) { in print_bpf_insn()
137 insn->code, class == BPF_ALU ? 'w' : 'r', in print_bpf_insn()
139 bpf_alu_string[BPF_OP(insn->code) >> 4], in print_bpf_insn()
[all …]
Dverifier.c1147 if (insn[i].code != (BPF_JMP | BPF_CALL)) in check_subprogs()
1173 u8 code = insn[i].code; in check_subprogs() local
1175 if (BPF_CLASS(code) != BPF_JMP && BPF_CLASS(code) != BPF_JMP32) in check_subprogs()
1177 if (BPF_OP(code) == BPF_EXIT || BPF_OP(code) == BPF_CALL) in check_subprogs()
1190 if (code != (BPF_JMP | BPF_EXIT) && in check_subprogs()
1191 code != (BPF_JMP | BPF_JA)) { in check_subprogs()
1262 u8 code, class, op; in is_reg64() local
1264 code = insn->code; in is_reg64()
1265 class = BPF_CLASS(code); in is_reg64()
1266 op = BPF_OP(code); in is_reg64()
[all …]
Dcore.c290 dst[i].code == (BPF_LD | BPF_IMM | BPF_DW) && in bpf_prog_calc_tag()
296 dst[i].code == 0 && in bpf_prog_calc_tag()
380 u8 code; in bpf_adj_branches() local
390 code = insn->code; in bpf_adj_branches()
391 if ((BPF_CLASS(code) != BPF_JMP && in bpf_adj_branches()
392 BPF_CLASS(code) != BPF_JMP32) || in bpf_adj_branches()
393 BPF_OP(code) == BPF_EXIT) in bpf_adj_branches()
396 if (BPF_OP(code) == BPF_CALL) { in bpf_adj_branches()
933 (from->code == (BPF_ALU | BPF_MOV | BPF_K) || in bpf_jit_blind_insn()
934 from->code == (BPF_ALU64 | BPF_MOV | BPF_K))) { in bpf_jit_blind_insn()
[all …]
Dsyscall.c2262 if (insns[i].code == (BPF_JMP | BPF_TAIL_CALL)) { in bpf_insn_prepare_dump()
2263 insns[i].code = BPF_JMP | BPF_CALL; in bpf_insn_prepare_dump()
2267 if (insns[i].code == (BPF_JMP | BPF_CALL) || in bpf_insn_prepare_dump()
2268 insns[i].code == (BPF_JMP | BPF_CALL_ARGS)) { in bpf_insn_prepare_dump()
2269 if (insns[i].code == (BPF_JMP | BPF_CALL_ARGS)) in bpf_insn_prepare_dump()
2270 insns[i].code = BPF_JMP | BPF_CALL; in bpf_insn_prepare_dump()
2276 if (insns[i].code != (BPF_LD | BPF_IMM | BPF_DW)) in bpf_insn_prepare_dump()
Dcgroup.c1349 BPF_SIZE(si->code), si->dst_reg, si->src_reg, in sysctl_convert_ctx_access()
1387 read_size = bpf_size_to_bytes(BPF_SIZE(si->code)); in sysctl_convert_ctx_access()
1389 BPF_SIZE(si->code), si->dst_reg, si->dst_reg, in sysctl_convert_ctx_access()
/kernel/
DKconfig.preempt25 "explicit preemption points" to the kernel code. These new
45 all kernel code (that is not executing in a critical section)
52 and a slight runtime overhead to kernel code.
68 low level and critical code pathes (entry code, scheduler, low
Dexit.c711 void __noreturn do_exit(long code) in do_exit() argument
735 ptrace_event(PTRACE_EVENT_EXIT, code); in do_exit()
771 tsk->signal->group_exit_code ?: (int)code); in do_exit()
780 acct_collect(code, group_dead); in do_exit()
785 tsk->exit_code = code; in do_exit()
857 void complete_and_exit(struct completion *comp, long code) in complete_and_exit() argument
862 do_exit(code); in complete_and_exit()
Djump_label.c70 jea->code = jeb->code - delta; in jump_label_swap()
74 jeb->code = tmp.code + delta; in jump_label_swap()
Dseccomp.c179 u16 code = ftest->code; in seccomp_check_filter() local
182 switch (code) { in seccomp_check_filter()
184 ftest->code = BPF_LDX | BPF_W | BPF_ABS; in seccomp_check_filter()
190 ftest->code = BPF_LD | BPF_IMM; in seccomp_check_filter()
194 ftest->code = BPF_LDX | BPF_IMM; in seccomp_check_filter()
Dsignal.c1651 int force_sig_fault_to_task(int sig, int code, void __user *addr in force_sig_fault_to_task() argument
1661 info.si_code = code; in force_sig_fault_to_task()
1674 int force_sig_fault(int sig, int code, void __user *addr in force_sig_fault() argument
1678 return force_sig_fault_to_task(sig, code, addr in force_sig_fault()
1683 int send_sig_fault(int sig, int code, void __user *addr in send_sig_fault() argument
1693 info.si_code = code; in send_sig_fault()
1706 int force_sig_mceerr(int code, void __user *addr, short lsb) in force_sig_mceerr() argument
1710 WARN_ON((code != BUS_MCEERR_AO) && (code != BUS_MCEERR_AR)); in force_sig_mceerr()
1714 info.si_code = code; in force_sig_mceerr()
1720 int send_sig_mceerr(int code, void __user *addr, short lsb, struct task_struct *t) in send_sig_mceerr() argument
[all …]
Dauditsc.c2607 void audit_seccomp(unsigned long syscall, long signr, int code) in audit_seccomp() argument
2617 in_compat_syscall(), KSTK_EIP(current), code); in audit_seccomp()
Dmodule.c335 void __noreturn __module_put_and_exit(struct module *mod, long code) in __module_put_and_exit() argument
338 do_exit(code); in __module_put_and_exit()
/kernel/gcov/
DKconfig10 This option enables gcov-based code profiling (e.g. for code coverage
/kernel/irq/
DKconfig3 # Options selectable by the architecture code
30 # and should not be used in new code. Use irq domains instead.
/kernel/livepatch/
DKconfig20 to new function code contained in the patch module.
/kernel/power/
DKconfig172 code. This is helpful when debugging and reporting PM bugs, like
217 This enables code to save the last PM event point across
221 The architecture specific code must provide the extern
235 This enables some cheesy code to save the last PM event point in the
/kernel/rcu/
DKconfig83 This option enables RCU CPU stall code that is common between
106 code paths on small(er) systems.
DKconfig.debug89 bugs in arch-specific NO_HZ code.
/kernel/debug/
Ddebug_core.c928 dbg_notify_reboot(struct notifier_block *this, unsigned long code, void *x) in dbg_notify_reboot() argument
943 gdbstub_exit(code); in dbg_notify_reboot()