Home
last modified time | relevance | path

Searched refs:code (Results 1 – 13 of 13) sorted by relevance

/kernel/bpf/
Dverifier.c285 u8 class = BPF_CLASS(insn->code); in print_bpf_insn()
288 if (BPF_SRC(insn->code) == BPF_X) in print_bpf_insn()
290 insn->code, class == BPF_ALU ? "(u32) " : "", in print_bpf_insn()
292 bpf_alu_string[BPF_OP(insn->code) >> 4], in print_bpf_insn()
297 insn->code, class == BPF_ALU ? "(u32) " : "", in print_bpf_insn()
299 bpf_alu_string[BPF_OP(insn->code) >> 4], in print_bpf_insn()
303 if (BPF_MODE(insn->code) == BPF_MEM) in print_bpf_insn()
305 insn->code, in print_bpf_insn()
306 bpf_ldst_string[BPF_SIZE(insn->code) >> 3], in print_bpf_insn()
309 else if (BPF_MODE(insn->code) == BPF_XADD) in print_bpf_insn()
[all …]
Dcore.c141 return BPF_CLASS(insn->code) == BPF_JMP && in bpf_is_jmp_and_has_target()
145 BPF_OP(insn->code) != BPF_CALL && in bpf_is_jmp_and_has_target()
146 BPF_OP(insn->code) != BPF_EXIT; in bpf_is_jmp_and_has_target()
261 (from->code == (BPF_ALU | BPF_MOV | BPF_K) || in bpf_jit_blind_insn()
262 from->code == (BPF_ALU64 | BPF_MOV | BPF_K))) { in bpf_jit_blind_insn()
267 switch (from->code) { in bpf_jit_blind_insn()
279 *to++ = BPF_ALU32_REG(from->code, from->dst_reg, BPF_REG_AX); in bpf_jit_blind_insn()
293 *to++ = BPF_ALU64_REG(from->code, from->dst_reg, BPF_REG_AX); in bpf_jit_blind_insn()
309 *to++ = BPF_JMP_REG(from->code, from->dst_reg, BPF_REG_AX, off); in bpf_jit_blind_insn()
317 *to++ = BPF_LD_IND(from->code, BPF_REG_AX, 0); in bpf_jit_blind_insn()
[all …]
/kernel/
Djump_label.c203 if (entry->code <= (unsigned long)end && in addr_conflict()
204 entry->code + JUMP_LABEL_NOP_SIZE > (unsigned long)start) in addr_conflict()
277 if (entry->code && kernel_text_address(entry->code)) in __jump_label_update()
486 if (within_module_init(iter->code, mod)) in jump_label_invalidate_module_init()
487 iter->code = 0; in jump_label_invalidate_module_init()
DKconfig.preempt23 "explicit preemption points" to the kernel code. These new
42 all kernel code (that is not executing in a critical section)
49 and a slight runtime overhead to kernel code.
Dseccomp.c105 u16 code = ftest->code; in seccomp_check_filter() local
108 switch (code) { in seccomp_check_filter()
110 ftest->code = BPF_LDX | BPF_W | BPF_ABS; in seccomp_check_filter()
116 ftest->code = BPF_LD | BPF_IMM; in seccomp_check_filter()
120 ftest->code = BPF_LDX | BPF_IMM; in seccomp_check_filter()
Dexit.c734 void __noreturn do_exit(long code) in do_exit() argument
759 ptrace_event(PTRACE_EVENT_EXIT, code); in do_exit()
816 acct_collect(code, group_dead); in do_exit()
821 tsk->exit_code = code; in do_exit()
899 void complete_and_exit(struct completion *comp, long code) in complete_and_exit() argument
904 do_exit(code); in complete_and_exit()
Dauditsc.c2410 void __audit_seccomp(unsigned long syscall, long signr, int code) in __audit_seccomp() argument
2420 in_compat_syscall(), KSTK_EIP(current), code); in __audit_seccomp()
Dmodule.c340 void __noreturn __module_put_and_exit(struct module *mod, long code) in __module_put_and_exit() argument
343 do_exit(code); in __module_put_and_exit()
/kernel/gcov/
DKconfig9 This option enables gcov-based code profiling (e.g. for code coverage
/kernel/irq/
DKconfig2 # Options selectable by the architecture code
25 # and should not be used in new code. Use irq domains instead.
/kernel/livepatch/
DKconfig18 to new function code contained in the patch module.
/kernel/power/
DKconfig162 code. This is helpful when debugging and reporting PM bugs, like
207 This enables code to save the last PM event point across
211 The architecture specific code must provide the extern
225 This enables some cheesy code to save the last PM event point in the
/kernel/debug/
Ddebug_core.c872 dbg_notify_reboot(struct notifier_block *this, unsigned long code, void *x) in dbg_notify_reboot() argument
887 gdbstub_exit(code); in dbg_notify_reboot()