/kernel/bpf/ |
D | tnum.c | 12 #define TNUM(_v, _m) (struct tnum){.value = _v, .mask = _m} 14 const struct tnum tnum_unknown = { .value = 0, .mask = -1 }; 16 struct tnum tnum_const(u64 value) in tnum_const() argument 18 return TNUM(value, 0); in tnum_const() 39 return TNUM(a.value << shift, a.mask << shift); in tnum_lshift() 44 return TNUM(a.value >> shift, a.mask >> shift); in tnum_rshift() 55 return TNUM((u32)(((s32)a.value) >> min_shift), in tnum_arshift() 58 return TNUM((s64)a.value >> min_shift, in tnum_arshift() 67 sv = a.value + b.value; in tnum_add() 78 dv = a.value - b.value; in tnum_sub() [all …]
|
D | queue_stack_maps.c | 98 static int __queue_map_get(struct bpf_map *map, void *value, bool delete) in __queue_map_get() argument 113 memset(value, 0, qs->map.value_size); in __queue_map_get() 119 memcpy(value, ptr, qs->map.value_size); in __queue_map_get() 132 static int __stack_map_get(struct bpf_map *map, void *value, bool delete) in __stack_map_get() argument 148 memset(value, 0, qs->map.value_size); in __stack_map_get() 158 memcpy(value, ptr, qs->map.value_size); in __stack_map_get() 169 static int queue_map_peek_elem(struct bpf_map *map, void *value) in queue_map_peek_elem() argument 171 return __queue_map_get(map, value, false); in queue_map_peek_elem() 175 static int stack_map_peek_elem(struct bpf_map *map, void *value) in stack_map_peek_elem() argument 177 return __stack_map_get(map, value, false); in stack_map_peek_elem() [all …]
|
D | bloom_filter.c | 30 static u32 hash(struct bpf_bloom_filter *bloom, void *value, in hash() argument 36 h = jhash2(value, bloom->aligned_u32_count, in hash() 39 h = jhash(value, value_size, bloom->hash_seed + index); in hash() 44 static int bloom_map_peek_elem(struct bpf_map *map, void *value) in bloom_map_peek_elem() argument 51 h = hash(bloom, value, map->value_size, i); in bloom_map_peek_elem() 59 static int bloom_map_push_elem(struct bpf_map *map, void *value, u64 flags) in bloom_map_push_elem() argument 69 h = hash(bloom, value, map->value_size, i); in bloom_map_push_elem() 76 static int bloom_map_pop_elem(struct bpf_map *map, void *value) in bloom_map_pop_elem() argument 81 static int bloom_map_delete_elem(struct bpf_map *map, void *value) in bloom_map_delete_elem() argument 181 void *value, u64 flags) in bloom_map_update_elem() argument
|
D | hashtab.c | 926 void *value, bool onallcpus) in pcpu_copy_value() argument 930 memcpy(this_cpu_ptr(pptr), value, htab->map.value_size); in pcpu_copy_value() 937 value + off, size); in pcpu_copy_value() 944 void *value, bool onallcpus) in pcpu_init_value() argument 958 bpf_long_memcpy(per_cpu_ptr(pptr, cpu), value, in pcpu_init_value() 964 pcpu_copy_value(htab, pptr, value, onallcpus); in pcpu_init_value() 975 void *value, u32 key_size, u32 hash, in alloc_htab_elem() argument 1034 pcpu_init_value(htab, pptr, value, onallcpus); in alloc_htab_elem() 1040 memcpy(l_new->key + round_up(key_size, 8), value, size); in alloc_htab_elem() 1044 value); in alloc_htab_elem() [all …]
|
D | arraymap.c | 136 - offsetof(struct bpf_array, value); in array_map_alloc() 159 return array->value + (u64)array->elem_size * index; in array_map_elem_ptr() 171 return array->value + (u64)array->elem_size * (index & array->index_mask); in array_map_lookup_elem() 184 *imm = (unsigned long)array->value; in array_map_direct_value_addr() 192 u64 base = (unsigned long)array->value; in array_map_direct_value_meta() 217 *insn++ = BPF_ALU64_IMM(BPF_ADD, map_ptr, offsetof(struct bpf_array, value)); in array_map_gen_lookup() 263 int bpf_percpu_array_copy(struct bpf_map *map, void *key, void *value) in bpf_percpu_array_copy() argument 282 copy_map_value_long(map, value + off, per_cpu_ptr(pptr, cpu)); in bpf_percpu_array_copy() 283 check_and_init_map_value(map, value + off); in bpf_percpu_array_copy() 318 static int array_map_update_elem(struct bpf_map *map, void *key, void *value, in array_map_update_elem() argument [all …]
|
D | bpf_struct_ops.c | 247 void *value) in bpf_struct_ops_map_sys_lookup_elem() argument 260 memset(value, 0, map->value_size); in bpf_struct_ops_map_sys_lookup_elem() 267 uvalue = value; in bpf_struct_ops_map_sys_lookup_elem() 353 void *value, u64 flags) in bpf_struct_ops_map_update_elem() argument 372 err = check_zero_holes(st_ops->value_type, value); in bpf_struct_ops_map_update_elem() 376 uvalue = value; in bpf_struct_ops_map_update_elem() 398 memcpy(uvalue, value, map->value_size); in bpf_struct_ops_map_update_elem() 557 void *value; in bpf_struct_ops_map_seq_show_elem() local 560 value = kmalloc(map->value_size, GFP_USER | __GFP_NOWARN); in bpf_struct_ops_map_seq_show_elem() 561 if (!value) in bpf_struct_ops_map_seq_show_elem() [all …]
|
D | bpf_local_storage.c | 76 void *value, bool charge_mem, gfp_t gfp_flags) in bpf_selem_alloc() argument 86 if (value) in bpf_selem_alloc() 87 copy_map_value(&smap->map, SDATA(selem)->data, value); in bpf_selem_alloc() 373 void *value, u64 map_flags, gfp_t gfp_flags) in bpf_local_storage_update() argument 399 selem = bpf_selem_alloc(smap, owner, value, true, gfp_flags); in bpf_local_storage_update() 425 value, false); in bpf_local_storage_update() 431 selem = bpf_selem_alloc(smap, owner, value, true, gfp_flags); in bpf_local_storage_update() 455 copy_map_value_locked(&smap->map, old_sdata->data, value, in bpf_local_storage_update() 471 selem = bpf_selem_alloc(smap, owner, value, !old_sdata, gfp_flags); in bpf_local_storage_update()
|
D | syscall.c | 181 void *value, __u64 flags) in bpf_map_update_value() argument 187 return bpf_map_offload_update_elem(map, key, value, flags); in bpf_map_update_value() 190 return map->ops->map_update_elem(map, key, value, flags); in bpf_map_update_value() 193 return sock_map_update_elem_sys(map, key, value, flags); in bpf_map_update_value() 195 return bpf_fd_array_map_update_elem(map, f.file, key, value, in bpf_map_update_value() 202 err = bpf_percpu_hash_update(map, key, value, flags); in bpf_map_update_value() 204 err = bpf_percpu_array_update(map, key, value, flags); in bpf_map_update_value() 206 err = bpf_percpu_cgroup_storage_update(map, key, value, in bpf_map_update_value() 210 err = bpf_fd_array_map_update_elem(map, f.file, key, value, in bpf_map_update_value() 215 err = bpf_fd_htab_map_update_elem(map, f.file, key, value, in bpf_map_update_value() [all …]
|
D | cpumap.c | 68 struct bpf_cpumap_val value; member 424 rcpu->value.bpf_prog.id = prog->aux->id; in __cpu_map_load_bpf_program() 431 __cpu_map_entry_alloc(struct bpf_map *map, struct bpf_cpumap_val *value, in __cpu_map_entry_alloc() argument 434 int numa, err, i, fd = value->bpf_prog.fd; in __cpu_map_entry_alloc() 463 err = ptr_ring_init(rcpu->queue, value->qsize, gfp); in __cpu_map_entry_alloc() 469 rcpu->value.qsize = value->qsize; in __cpu_map_entry_alloc() 572 static int cpu_map_update_elem(struct bpf_map *map, void *key, void *value, in cpu_map_update_elem() argument 581 memcpy(&cpumap_value, value, map->value_size); in cpu_map_update_elem() 666 return rcpu ? &rcpu->value : NULL; in cpu_map_lookup_elem()
|
D | reuseport_array.c | 169 void *value) in bpf_fd_reuseport_array_lookup_elem() argument 180 *(u64 *)value = __sock_gen_cookie(sk); in bpf_fd_reuseport_array_lookup_elem() 236 void *value, u64 map_flags) in bpf_fd_reuseport_array_update_elem() argument 253 u64 fd64 = *(u64 *)value; in bpf_fd_reuseport_array_update_elem() 259 fd = *(int *)value; in bpf_fd_reuseport_array_update_elem()
|
D | helpers.c | 51 void *, value, u64, flags) in BPF_CALL_4() argument 55 return map->ops->map_update_elem(map, key, value, flags); in BPF_CALL_4() 85 BPF_CALL_3(bpf_map_push_elem, struct bpf_map *, map, void *, value, u64, flags) in BPF_CALL_3() argument 87 return map->ops->map_push_elem(map, value, flags); in BPF_CALL_3() 100 BPF_CALL_2(bpf_map_pop_elem, struct bpf_map *, map, void *, value) in BPF_CALL_2() argument 102 return map->ops->map_pop_elem(map, value); in BPF_CALL_2() 113 BPF_CALL_2(bpf_map_peek_elem, struct bpf_map *, map, void *, value) in BPF_CALL_2() argument 115 return map->ops->map_peek_elem(map, value); in BPF_CALL_2() 1093 void *value; member 1113 void *value = t->value; in bpf_timer_cb() local [all …]
|
D | bpf_inode_storage.c | 122 void *value, u64 map_flags) in bpf_fd_inode_storage_update_elem() argument 139 value, map_flags, GFP_ATOMIC); in bpf_fd_inode_storage_update_elem() 174 void *, value, u64, flags, gfp_t, gfp_flags) in BPF_CALL_5() argument 199 inode, (struct bpf_local_storage_map *)map, value, in BPF_CALL_5()
|
D | local_storage.c | 145 void *value, u64 flags) in cgroup_storage_update_elem() argument 163 copy_map_value_locked(map, storage->buf->data, value, false); in cgroup_storage_update_elem() 173 memcpy(&new->data[0], value, map->value_size); in cgroup_storage_update_elem() 183 void *value) in bpf_percpu_cgroup_storage_copy() argument 203 bpf_long_memcpy(value + off, in bpf_percpu_cgroup_storage_copy() 212 void *value, u64 map_flags) in bpf_percpu_cgroup_storage_update() argument 238 value + off, size); in bpf_percpu_cgroup_storage_update()
|
D | bpf_task_storage.c | 152 void *value, u64 map_flags) in bpf_pid_task_storage_update_elem() argument 177 task, (struct bpf_local_storage_map *)map, value, map_flags, in bpf_pid_task_storage_update_elem() 232 task, void *, value, u64, flags, gfp_t, gfp_flags) in BPF_CALL_5() argument 254 task, (struct bpf_local_storage_map *)map, value, in BPF_CALL_5()
|
/kernel/time/ |
D | itimer.c | 48 struct itimerspec64 *const value) in get_cpu_itimer() argument 72 value->it_value = ns_to_timespec64(val); in get_cpu_itimer() 73 value->it_interval = ns_to_timespec64(interval); in get_cpu_itimer() 76 static int do_getitimer(int which, struct itimerspec64 *value) in do_getitimer() argument 83 value->it_value = itimer_get_remtime(&tsk->signal->real_timer); in do_getitimer() 84 value->it_interval = in do_getitimer() 89 get_cpu_itimer(tsk, CPUCLOCK_VIRT, value); in do_getitimer() 92 get_cpu_itimer(tsk, CPUCLOCK_PROF, value); in do_getitimer() 113 SYSCALL_DEFINE2(getitimer, int, which, struct __kernel_old_itimerval __user *, value) in SYSCALL_DEFINE2() argument 118 if (!error && put_itimerval(value, &get_buffer)) in SYSCALL_DEFINE2() [all …]
|
D | timeconst.bc | 20 /* Adjustment factor when a ceiling value is used. Use as: 30 which brings the mul value into the range 2^b-1 <= x < 2^b. Such 31 a shift value will be correct in the signed integer range and off 55 print "#error \qinclude/generated/timeconst.h has the wrong HZ value!\q\n" 59 print "#error Totally bogus HZ value!\n"
|
/kernel/power/ |
D | qos.c | 77 static void pm_qos_set_value(struct pm_qos_constraints *c, s32 value) in pm_qos_set_value() argument 79 WRITE_ONCE(c->target_value, value); in pm_qos_set_value() 100 enum pm_qos_req_action action, int value) in pm_qos_update_target() argument 108 if (value == PM_QOS_DEFAULT_VALUE) in pm_qos_update_target() 111 new_value = value; in pm_qos_update_target() 246 enum pm_qos_req_action action, s32 value) in cpu_latency_qos_apply() argument 248 int ret = pm_qos_update_target(req->qos, &req->node, action, value); in cpu_latency_qos_apply() 265 void cpu_latency_qos_add_request(struct pm_qos_request *req, s32 value) in cpu_latency_qos_add_request() argument 275 trace_pm_qos_add_request(value); in cpu_latency_qos_add_request() 278 cpu_latency_qos_apply(req, PM_QOS_ADD_REQ, value); in cpu_latency_qos_add_request() [all …]
|
D | suspend_test.c | 150 static int __init setup_test_suspend(char *value) in setup_test_suspend() argument 157 value++; in setup_test_suspend() 158 suspend_type = strsep(&value, ","); in setup_test_suspend() 162 repeat = strsep(&value, ","); in setup_test_suspend()
|
/kernel/ |
D | kexec_elf.c | 29 static uint64_t elf64_to_cpu(const struct elfhdr *ehdr, uint64_t value) in elf64_to_cpu() argument 32 value = le64_to_cpu(value); in elf64_to_cpu() 34 value = be64_to_cpu(value); in elf64_to_cpu() 36 return value; in elf64_to_cpu() 39 static uint32_t elf32_to_cpu(const struct elfhdr *ehdr, uint32_t value) in elf32_to_cpu() argument 42 value = le32_to_cpu(value); in elf32_to_cpu() 44 value = be32_to_cpu(value); in elf32_to_cpu() 46 return value; in elf32_to_cpu() 49 static uint16_t elf16_to_cpu(const struct elfhdr *ehdr, uint16_t value) in elf16_to_cpu() argument 52 value = le16_to_cpu(value); in elf16_to_cpu() [all …]
|
D | acct.c | 334 static comp_t encode_comp_t(unsigned long value) in encode_comp_t() argument 339 while (value > MAXFRACT) { in encode_comp_t() 340 rnd = value & (1 << (EXPSIZE - 1)); /* Round up? */ in encode_comp_t() 341 value >>= EXPSIZE; /* Base 8 exponent == 3 bit shift. */ in encode_comp_t() 348 if (rnd && (++value > MAXFRACT)) { in encode_comp_t() 349 value >>= EXPSIZE; in encode_comp_t() 359 exp += value; /* and add on the mantissa. */ in encode_comp_t() 378 static comp2_t encode_comp2_t(u64 value) in encode_comp2_t() argument 382 exp = (value > (MAXFRACT2>>1)); in encode_comp2_t() 384 while (value > MAXFRACT2) { in encode_comp2_t() [all …]
|
D | kallsyms.c | 650 unsigned long value; member 659 int __weak arch_get_kallsym(unsigned int symnum, unsigned long *value, in arch_get_kallsym() argument 668 &iter->value, &iter->type, in get_ksymbol_arch() 682 &iter->value, &iter->type, in get_ksymbol_mod() 701 &iter->value, &iter->type, in get_ksymbol_ftrace_mod() 719 &iter->value, &iter->type, in get_ksymbol_bpf() 739 &iter->value, &iter->type, in get_ksymbol_kprobe() 749 iter->value = kallsyms_sym_address(iter->pos); in get_ksymbol_core() 838 void *value; in s_show() local 845 value = iter->show_value ? (void *)iter->value : NULL; in s_show() [all …]
|
/kernel/cgroup/ |
D | rdma.c | 359 char *name, *value = c; in parse_resource() local 363 name = strsep(&value, "="); in parse_resource() 364 if (!name || !value) in parse_resource() 371 len = strlen(value); in parse_resource() 373 argstr.from = value; in parse_resource() 374 argstr.to = value + len; in parse_resource() 382 if (strncmp(value, RDMACG_MAX_STR, len) == 0) { in parse_resource() 497 u32 value; in print_rpool_values() local 506 value = rpool->resources[i].max; in print_rpool_values() 508 value = S32_MAX; in print_rpool_values() [all …]
|
/kernel/module/ |
D | procfs.c | 69 void *value; in m_show() local 89 value = m->private ? NULL : mod->core_layout.base; in m_show() 90 seq_printf(m, " 0x%px", value); in m_show()
|
/kernel/debug/kdb/ |
D | kdb_main.c | 311 static int kdbgetulenv(const char *match, unsigned long *value) in kdbgetulenv() argument 321 *value = simple_strtoul(ep, NULL, 0); in kdbgetulenv() 336 int kdbgetintenv(const char *match, int *value) in kdbgetintenv() argument 343 *value = (int) val; in kdbgetintenv() 414 int kdbgetularg(const char *arg, unsigned long *value) in kdbgetularg() argument 431 *value = val; in kdbgetularg() 436 int kdbgetu64arg(const char *arg, u64 *value) in kdbgetu64arg() argument 450 *value = val; in kdbgetu64arg() 544 unsigned long *value, long *offset, in kdbgetaddrarg() argument 621 if (value) in kdbgetaddrarg() [all …]
|
/kernel/trace/ |
D | trace_mmiotrace.c | 184 rw->value, rw->pc, 0); in mmio_print_rw() 191 rw->value, rw->pc, 0); in mmio_print_rw() 199 (rw->value >> 16) & 0xff, (rw->value >> 8) & 0xff, in mmio_print_rw() 200 (rw->value >> 0) & 0xff, rw->pc, 0); in mmio_print_rw()
|