/kernel/bpf/ |
D | core.c | 160 if (i < pos && i + insn->off + 1 > pos) in bpf_adj_branches() 161 insn->off += delta; in bpf_adj_branches() 162 else if (i > pos + delta && i + insn->off + 1 <= pos + delta) in bpf_adj_branches() 163 insn->off -= delta; in bpf_adj_branches() 167 struct bpf_prog *bpf_patch_insn_single(struct bpf_prog *prog, u32 off, in bpf_patch_insn_single() argument 175 memcpy(prog->insnsi + off, patch, sizeof(*patch)); in bpf_patch_insn_single() 200 insn_rest = insn_adj_cnt - off - len; in bpf_patch_insn_single() 202 memmove(prog_adj->insnsi + off + len, prog_adj->insnsi + off + 1, in bpf_patch_insn_single() 204 memcpy(prog_adj->insnsi + off, patch, sizeof(*patch) * len); in bpf_patch_insn_single() 206 bpf_adj_branches(prog_adj, off, insn_delta); in bpf_patch_insn_single() [all …]
|
D | verifier.c | 352 insn->off, insn->src_reg); in print_bpf_insn() 357 insn->dst_reg, insn->off, in print_bpf_insn() 370 insn->off, insn->imm); in print_bpf_insn() 379 insn->src_reg, insn->off); in print_bpf_insn() 415 insn->code, insn->off); in print_bpf_insn() 422 insn->src_reg, insn->off); in print_bpf_insn() 427 insn->imm, insn->off); in print_bpf_insn() 574 struct verifier_state *state, int off, in check_stack_write() argument 577 int i, spi = (MAX_BPF_STACK + off) / BPF_REG_SIZE; in check_stack_write() 595 if (state->stack_slot_type[MAX_BPF_STACK + off + i] == STACK_MISC && in check_stack_write() [all …]
|
/kernel/ |
D | kallsyms.c | 91 static unsigned int kallsyms_expand_symbol(unsigned int off, in kallsyms_expand_symbol() argument 98 data = &kallsyms_names[off]; in kallsyms_expand_symbol() 106 off += len + 1; in kallsyms_expand_symbol() 135 return off; in kallsyms_expand_symbol() 142 static char kallsyms_get_symbol_type(unsigned int off) in kallsyms_get_symbol_type() argument 148 return kallsyms_token_table[kallsyms_token_index[kallsyms_names[off + 1]]]; in kallsyms_get_symbol_type() 184 unsigned int off; in kallsyms_lookup_name() local 186 for (i = 0, off = 0; i < kallsyms_num_syms; i++) { in kallsyms_lookup_name() 187 off = kallsyms_expand_symbol(off, namebuf, ARRAY_SIZE(namebuf)); in kallsyms_lookup_name() 202 unsigned int off; in kallsyms_on_each_symbol() local [all …]
|
D | kcov.c | 274 unsigned long size, off; in kcov_mmap() local 292 for (off = 0; off < size; off += PAGE_SIZE) { in kcov_mmap() 293 page = vmalloc_to_page(kcov->area + off); in kcov_mmap() 294 if (vm_insert_page(vma, vma->vm_start + off, page)) in kcov_mmap()
|
D | params.c | 510 int i, off, ret; in param_array_get() local 514 for (i = off = 0; i < (arr->num ? *arr->num : arr->max); i++) { in param_array_get() 516 buffer[off++] = ','; in param_array_get() 519 ret = arr->ops->get(buffer + off, &p); in param_array_get() 522 off += ret; in param_array_get() 524 buffer[off] = '\0'; in param_array_get() 525 return off; in param_array_get()
|
D | pid.c | 56 struct pidmap *map, int off) in mk_pid() argument 58 return (map - pid_ns->pidmap)*BITS_PER_PAGE + off; in mk_pid() 61 #define find_next_offset(map, off) \ argument 62 find_next_zero_bit((map)->page, BITS_PER_PAGE, off)
|
D | ksysfs.c | 173 char *buf, loff_t off, size_t count) in notes_read() argument 175 memcpy(buf, &__start_notes + off, count); in notes_read()
|
D | ptrace.c | 701 if (arg.off > ULONG_MAX) in ptrace_peek_siginfo() 711 unsigned long off = arg.off + i; in ptrace_peek_siginfo() local 716 if (!off--) { in ptrace_peek_siginfo()
|
D | cgroup_pids.c | 241 size_t nbytes, loff_t off) in pids_max_write() argument
|
D | cgroup.c | 2731 size_t nbytes, loff_t off, bool threadgroup) in __cgroup_procs_write() argument 2825 char *buf, size_t nbytes, loff_t off) in cgroup_tasks_write() argument 2827 return __cgroup_procs_write(of, buf, nbytes, off, false); in cgroup_tasks_write() 2831 char *buf, size_t nbytes, loff_t off) in cgroup_procs_write() argument 2833 return __cgroup_procs_write(of, buf, nbytes, off, true); in cgroup_procs_write() 2837 char *buf, size_t nbytes, loff_t off) in cgroup_release_agent_write() argument 2980 loff_t off) in cgroup_subtree_control_write() argument 3221 size_t nbytes, loff_t off) in cgroup_file_write() argument 3229 return cft->write(of, buf, nbytes, off); in cgroup_file_write()
|
D | cgroup_freezer.c | 420 char *buf, size_t nbytes, loff_t off) in freezer_write() argument
|
D | cpuset.c | 1692 char *buf, size_t nbytes, loff_t off) in cpuset_write_resmask() argument
|
/kernel/power/ |
D | swap.c | 671 size_t off; in save_image_lzo() local 767 for (off = 0; off < LZO_UNC_SIZE; off += PAGE_SIZE) { in save_image_lzo() 775 memcpy(data[thr].unc + off, in save_image_lzo() 785 if (!off) in save_image_lzo() 788 data[thr].unc_len = off; in save_image_lzo() 832 for (off = 0; in save_image_lzo() 833 off < LZO_HEADER + data[thr].cmp_len; in save_image_lzo() 834 off += PAGE_SIZE) { in save_image_lzo() 835 memcpy(page, data[thr].cmp + off, PAGE_SIZE); in save_image_lzo() 1166 size_t off; in load_image_lzo() local [all …]
|
D | Kconfig | 17 This allows you to turn off the freezer for suspend. If this is 54 system and powers it off; and restores that checkpoint on reboot. 264 manpage ("man 8 hdparm") for that), and it doesn't turn off
|
/kernel/trace/ |
D | bpf_trace.c | 308 static bool kprobe_prog_is_valid_access(int off, int size, enum bpf_access_type type) in kprobe_prog_is_valid_access() argument 311 if (off < 0 || off >= sizeof(struct pt_regs)) in kprobe_prog_is_valid_access() 319 if (off % size != 0) in kprobe_prog_is_valid_access()
|
D | Kconfig | 180 bool "Interrupts-off Latency Tracer" 191 This option measures the time spent in irqs-off critical 201 enabled. This option and the preempt-off timing option can be 205 bool "Preemption-off Latency Tracer" 215 This option measures the time spent in preemption-off critical 225 enabled. This option and the irqs-off timing option can be 559 When the tracepoint is enabled, it kicks off a kernel thread that 606 kernel boot sequence, the test will start that kicks off
|
/kernel/gcov/ |
D | gcc_4_7.c | 382 static size_t store_gcov_u32(void *buffer, size_t off, u32 v) in store_gcov_u32() argument 387 data = buffer + off; in store_gcov_u32() 406 static size_t store_gcov_u64(void *buffer, size_t off, u64 v) in store_gcov_u64() argument 411 data = buffer + off; in store_gcov_u64()
|
/kernel/debug/kdb/ |
D | kdb_main.c | 497 unsigned long off = 0; in kdbgetaddrarg() local 610 diag = kdbgetularg(cp, &off); in kdbgetaddrarg() 615 off = -off; in kdbgetaddrarg() 618 *offset += off; in kdbgetaddrarg() 621 *value += off; in kdbgetaddrarg()
|
/kernel/irq/ |
D | Kconfig | 33 # Support for generic irq migrating off cpu before the cpu is offline.
|
/kernel/time/ |
D | timeconst.bc | 29 a shift value will be correct in the signed integer range and off
|
D | Kconfig | 113 dynamically off.
|