/kernel/printk/ |
D | printk_ringbuffer.c | 333 #define DESC_ID_PREV_WRAP(desc_ring, id) \ argument 334 DESC_ID((id) - DESCS_COUNT(desc_ring)) 347 unsigned long id; member 384 size = ALIGN(size, sizeof(db->id)); in to_blk_size() 407 if (size > DATA_SIZE(data_ring) - sizeof(db->id)) in data_check_size() 414 static enum desc_state get_desc_state(unsigned long id, in get_desc_state() argument 417 if (id != DESC_ID(state_val)) in get_desc_state() 433 unsigned long id, struct prb_desc *desc_out, in desc_read() argument 436 struct printk_info *info = to_info(desc_ring, id); in desc_read() 437 struct prb_desc *desc = to_desc(desc_ring, id); in desc_read() [all …]
|
D | printk_ringbuffer.h | 108 unsigned long id; member 127 #define DESC_SV(id, state) (((unsigned long)state << DESC_FLAGS_SHIFT) | id) argument
|
/kernel/livepatch/ |
D | shadow.c | 58 unsigned long id; member 71 unsigned long id) in klp_shadow_match() argument 73 return shadow->obj == obj && shadow->id == id; in klp_shadow_match() 83 void *klp_shadow_get(void *obj, unsigned long id) in klp_shadow_get() argument 92 if (klp_shadow_match(shadow, obj, id)) { in klp_shadow_get() 104 static void *__klp_shadow_get_or_alloc(void *obj, unsigned long id, in __klp_shadow_get_or_alloc() argument 114 shadow_data = klp_shadow_get(obj, id); in __klp_shadow_get_or_alloc() 129 shadow_data = klp_shadow_get(obj, id); in __klp_shadow_get_or_alloc() 141 new_shadow->id = id; in __klp_shadow_get_or_alloc() 151 obj, id, err); in __klp_shadow_get_or_alloc() [all …]
|
D | state.c | 16 for (state = patch->states; state && state->id; state++) 31 struct klp_state *klp_get_state(struct klp_patch *patch, unsigned long id) in klp_get_state() argument 36 if (state->id == id) in klp_get_state() 64 struct klp_state *klp_get_prev_state(unsigned long id) in klp_get_prev_state() argument 76 state = klp_get_state(patch, id); in klp_get_prev_state() 92 state = klp_get_state(patch, old_state->id); in klp_is_state_compatible()
|
/kernel/bpf/ |
D | stackmap.c | 219 u32 hash, id, trace_nr, trace_len; in __bpf_get_stackid() local 232 id = hash & (smap->n_buckets - 1); in __bpf_get_stackid() 233 bucket = READ_ONCE(smap->buckets[id]); in __bpf_get_stackid() 238 return id; in __bpf_get_stackid() 254 return id; in __bpf_get_stackid() 263 return id; in __bpf_get_stackid() 277 old_bucket = xchg(&smap->buckets[id], new_bucket); in __bpf_get_stackid() 280 return id; in __bpf_get_stackid() 582 u32 id = *(u32 *)key, trace_len; in bpf_stackmap_copy() local 584 if (unlikely(id >= smap->n_buckets)) in bpf_stackmap_copy() [all …]
|
D | syscall.c | 379 int id; in bpf_map_alloc_id() local 383 id = idr_alloc_cyclic(&map_idr, map, 1, INT_MAX, GFP_ATOMIC); in bpf_map_alloc_id() 384 if (id > 0) in bpf_map_alloc_id() 385 map->id = id; in bpf_map_alloc_id() 389 if (WARN_ON_ONCE(!id)) in bpf_map_alloc_id() 392 return id > 0 ? 0 : id; in bpf_map_alloc_id() 404 if (!map->id) in bpf_map_free_id() 412 idr_remove(&map_idr, map->id); in bpf_map_free_id() 413 map->id = 0; in bpf_map_free_id() 728 map->id, in bpf_map_show_fdinfo() [all …]
|
D | btf.c | 237 u32 id; member 543 int id; in bpf_find_btf_id() local 563 idr_for_each_entry(&btf_idr, btf, id) { in bpf_find_btf_id() 584 u32 id, u32 *res_id) in btf_type_skip_modifiers() argument 586 const struct btf_type *t = btf_type_by_id(btf, id); in btf_type_skip_modifiers() 589 id = t->type; in btf_type_skip_modifiers() 594 *res_id = id; in btf_type_skip_modifiers() 600 u32 id, u32 *res_id) in btf_type_resolve_ptr() argument 604 t = btf_type_skip_modifiers(btf, id, NULL); in btf_type_resolve_ptr() 612 u32 id, u32 *res_id) in btf_type_resolve_func_ptr() argument [all …]
|
D | cgroup_iter.c | 199 u64 id = linfo->cgroup.cgroup_id; in bpf_iter_attach_cgroup() local 209 if (fd && id) in bpf_iter_attach_cgroup() 214 else if (id) in bpf_iter_attach_cgroup() 215 cgrp = cgroup_get_from_id(id); in bpf_iter_attach_cgroup()
|
D | disasm.c | 54 const char *func_id_name(int id) in func_id_name() argument 56 if (id >= 0 && id < __BPF_FUNC_MAX_ID && func_id_str[id]) in func_id_name() 57 return func_id_str[id]; in func_id_name()
|
/kernel/ |
D | user_namespace.c | 237 u32 id; /* id to find */ member 251 id2 = key->id + key->count - 1; in cmp_map_id() 261 if (key->id >= first && key->id <= last && in cmp_map_id() 265 if (key->id < first || id2 < first) in cmp_map_id() 276 map_id_range_down_max(unsigned extents, struct uid_gid_map *map, u32 id, u32 count) in map_id_range_down_max() argument 282 key.id = id; in map_id_range_down_max() 294 map_id_range_down_base(unsigned extents, struct uid_gid_map *map, u32 id, u32 count) in map_id_range_down_base() argument 299 id2 = id + count - 1; in map_id_range_down_base() 305 if (id >= first && id <= last && in map_id_range_down_base() 312 static u32 map_id_range_down(struct uid_gid_map *map, u32 id, u32 count) in map_id_range_down() argument [all …]
|
D | watch_queue.c | 198 u64 id) in __post_watch_notification() argument 212 if (watch->id != id) in __post_watch_notification() 462 if (wqueue == wq && watch->id == w->id) in add_one_watch() 525 u64 id, bool all) in remove_watch_from_object() argument 538 (watch->id == id && rcu_access_pointer(watch->queue) == wq)) in remove_watch_from_object() 555 n.id = id; in remove_watch_from_object() 556 if (id != 0) in remove_watch_from_object()
|
/kernel/locking/ |
D | qspinlock_stat.h | 40 int cpu, id, len; in lockevent_read() local 46 id = (long)file_inode(file)->i_private; in lockevent_read() 48 if (id >= lockevent_num) in lockevent_read() 52 sum += per_cpu(lockevents[id], cpu); in lockevent_read() 56 switch (id) { in lockevent_read() 69 if (id == LOCKEVENT_pv_hash_hops) { in lockevent_read() 86 if ((id == LOCKEVENT_pv_latency_kick) || in lockevent_read() 87 (id == LOCKEVENT_pv_latency_wake)) { in lockevent_read()
|
D | lock_events.c | 65 int cpu, id, len; in lockevent_read() local 71 id = (long)file_inode(file)->i_private; in lockevent_read() 73 if (id >= lockevent_num) in lockevent_read() 77 sum += per_cpu(lockevents[id], cpu); in lockevent_read()
|
/kernel/time/ |
D | posix-clock.c | 199 static int get_clock_desc(const clockid_t id, struct posix_clock_desc *cd) in get_clock_desc() argument 201 struct file *fp = fget(clockid_to_fd(id)); in get_clock_desc() 226 static int pc_clock_adjtime(clockid_t id, struct __kernel_timex *tx) in pc_clock_adjtime() argument 231 err = get_clock_desc(id, &cd); in pc_clock_adjtime() 250 static int pc_clock_gettime(clockid_t id, struct timespec64 *ts) in pc_clock_gettime() argument 255 err = get_clock_desc(id, &cd); in pc_clock_gettime() 269 static int pc_clock_getres(clockid_t id, struct timespec64 *ts) in pc_clock_getres() argument 274 err = get_clock_desc(id, &cd); in pc_clock_getres() 288 static int pc_clock_settime(clockid_t id, const struct timespec64 *ts) in pc_clock_settime() argument 293 err = get_clock_desc(id, &cd); in pc_clock_settime()
|
D | clockevents.c | 703 ret = __clockevents_try_unbind(iter, dev->id); in unbind_device_store() 713 ret = clockevents_unbind(ce, dev->id); in unbind_device_store() 722 .id = 0, 729 &per_cpu(tick_cpu_device, dev->id); in tick_get_tick_dev() 743 return &per_cpu(tick_cpu_device, dev->id); in tick_get_tick_dev() 756 dev->id = cpu; in tick_init_sysfs()
|
D | posix-timers.c | 56 static const struct k_clock *clockid_to_kclock(const clockid_t id); 120 timer_t id) in __posix_timers_find() argument 126 if ((timer->it_signal == sig) && (timer->it_id == id)) in __posix_timers_find() 132 static struct k_itimer *posix_timer_by_id(timer_t id) in posix_timer_by_id() argument 135 struct hlist_head *head = &posix_timers_hashtable[hash(sig, id)]; in posix_timer_by_id() 137 return __posix_timers_find(head, sig, id); in posix_timer_by_id() 1440 static const struct k_clock *clockid_to_kclock(const clockid_t id) in clockid_to_kclock() argument 1442 clockid_t idx = id; in clockid_to_kclock() 1444 if (id < 0) { in clockid_to_kclock() 1445 return (id & CLOCKFD_MASK) == CLOCKFD ? in clockid_to_kclock() [all …]
|
/kernel/cgroup/ |
D | debug.c | 64 css = cset->subsys[ss->id]; in current_css_set_read() 67 seq_printf(seq, "%2d: %-4s\t- %p[%d]\n", ss->id, ss->name, in current_css_set_read() 68 css, css->id); in current_css_set_read() 217 css = rcu_dereference_check(cgrp->subsys[ss->id], true); in cgroup_subsys_states_read() 226 css->parent->id); in cgroup_subsys_states_read() 227 seq_printf(seq, "%2d: %-4s\t- %p[%d] %d%s\n", ss->id, ss->name, in cgroup_subsys_states_read() 228 css, css->id, in cgroup_subsys_states_read()
|
D | cgroup.c | 336 static void *cgroup_idr_replace(struct idr *idr, void *ptr, int id) in cgroup_idr_replace() argument 341 ret = idr_replace(idr, ptr, id); in cgroup_idr_replace() 346 static void cgroup_idr_remove(struct idr *idr, int id) in cgroup_idr_remove() argument 349 idr_remove(idr, id); in cgroup_idr_remove() 489 return rcu_dereference_check(cgrp->subsys[ss->id], in cgroup_css() 539 while (!(cgroup_ss_mask(cgrp) & (1 << ss->id))) { in cgroup_e_css_by_mask() 577 return init_css_set.subsys[ss->id]; in cgroup_e_css() 609 css = init_css_set.subsys[ss->id]; in cgroup_get_e_css() 670 return rcu_dereference_raw(cgrp->subsys[cft->ss->id]); in of_css() 1332 int id; in cgroup_init_root_id() local [all …]
|
/kernel/trace/ |
D | trace_export.c | 32 #define FTRACE_ENTRY_REG(name, struct_name, id, tstruct, print, regfn) \ argument 33 FTRACE_ENTRY(name, struct_name, id, PARAMS(tstruct), PARAMS(print)) 67 #define FTRACE_ENTRY(name, struct_name, id, tstruct, print) \ argument 80 #define FTRACE_ENTRY_DUP(name, struct_name, id, tstruct, print) \ argument 81 FTRACE_ENTRY(name, struct_name, id, PARAMS(tstruct), PARAMS(print)) 127 #define FTRACE_ENTRY(name, struct_name, id, tstruct, print) \ argument
|
D | trace.h | 93 #define FTRACE_ENTRY(name, struct_name, id, tstruct, print) \ argument 100 #define FTRACE_ENTRY_DUP(name, name_struct, id, tstruct, printk) argument 103 #define FTRACE_ENTRY_REG(name, struct_name, id, tstruct, print, regfn) \ argument 104 FTRACE_ENTRY(name, struct_name, id, PARAMS(tstruct), PARAMS(print)) 107 #define FTRACE_ENTRY_PACKED(name, struct_name, id, tstruct, print) \ argument 108 FTRACE_ENTRY(name, struct_name, id, PARAMS(tstruct), PARAMS(print)) __packed 432 #define IF_ASSIGN(var, entry, etype, id) \ argument 435 WARN_ON(id != 0 && (entry)->type != id); \ 1887 #define FTRACE_ENTRY(call, struct_name, id, tstruct, print) \ argument 1891 #define FTRACE_ENTRY_DUP(call, struct_name, id, tstruct, print) \ argument [all …]
|
D | trace_syscalls.c | 291 static void ftrace_syscall_enter(void *data, struct pt_regs *regs, long id) in ftrace_syscall_enter() argument 446 int id; in init_syscall_trace() local 459 id = trace_event_raw_init(call); in init_syscall_trace() 461 if (id < 0) { in init_syscall_trace() 463 return id; in init_syscall_trace() 466 return id; in init_syscall_trace() 571 static void perf_syscall_enter(void *ignore, struct pt_regs *regs, long id) in perf_syscall_enter() argument
|
/kernel/events/ |
D | hw_breakpoint_test.c | 65 static void fill_one_bp_slot(struct kunit *test, int *id, int cpu, struct task_struct *tsk) in fill_one_bp_slot() argument 67 struct perf_event *bp = register_test_bp(cpu, tsk, *id); in fill_one_bp_slot() 71 KUNIT_ASSERT_NULL(test, test_bps[*id]); in fill_one_bp_slot() 72 test_bps[(*id)++] = bp; in fill_one_bp_slot() 80 static bool fill_bp_slots(struct kunit *test, int *id, int cpu, struct task_struct *tsk, int skip) in fill_bp_slots() argument 83 fill_one_bp_slot(test, id, cpu, tsk); in fill_bp_slots() 85 return *id + get_test_bp_slots() <= MAX_TEST_BREAKPOINTS; in fill_bp_slots()
|
/kernel/bpf/preload/iterators/ |
D | iterators.bpf.c | 16 __u32 id; member 41 __u32 id; member 89 BPF_SEQ_PRINTF(seq, "%4u %-16s%6d\n", map->id, map->name, map->max_entries); in dump_bpf_map() 108 BPF_SEQ_PRINTF(seq, "%4u %-16s %s %s\n", aux->id, in dump_bpf_prog()
|
/kernel/debug/ |
D | gdbstub.c | 405 static char *pack_threadid(char *pkt, unsigned char *id) in pack_threadid() argument 410 limit = id + (BUF_THREAD_ID_SIZE / 2); in pack_threadid() 411 while (id < limit) { in pack_threadid() 412 if (!lzero || *id != 0) { in pack_threadid() 413 pkt = hex_byte_pack(pkt, *id); in pack_threadid() 416 id++; in pack_threadid() 425 static void int_to_threadref(unsigned char *id, int value) in int_to_threadref() argument 427 put_unaligned_be32(value, id); in int_to_threadref()
|
/kernel/sched/ |
D | autogroup.c | 99 ag->id = atomic_inc_return(&autogroup_seq_nr); in autogroup_create() 277 seq_printf(m, "/autogroup-%ld nice %d\n", ag->id, ag->nice); in proc_sched_autogroup_show_task() 290 return snprintf(buf, buflen, "%s-%ld", "/autogroup", tg->autogroup->id); in autogroup_path()
|