Home
last modified time | relevance | path

Searched refs:id (Results 1 – 25 of 50) sorted by relevance

12

/kernel/livepatch/
Dshadow.c58 unsigned long id; member
71 unsigned long id) in klp_shadow_match() argument
73 return shadow->obj == obj && shadow->id == id; in klp_shadow_match()
83 void *klp_shadow_get(void *obj, unsigned long id) in klp_shadow_get() argument
92 if (klp_shadow_match(shadow, obj, id)) { in klp_shadow_get()
104 static void *__klp_shadow_get_or_alloc(void *obj, unsigned long id, in __klp_shadow_get_or_alloc() argument
114 shadow_data = klp_shadow_get(obj, id); in __klp_shadow_get_or_alloc()
129 shadow_data = klp_shadow_get(obj, id); in __klp_shadow_get_or_alloc()
141 new_shadow->id = id; in __klp_shadow_get_or_alloc()
151 obj, id, err); in __klp_shadow_get_or_alloc()
[all …]
/kernel/bpf/
Dstackmap.c356 u32 hash, id, trace_nr, trace_len; in BPF_CALL_3() local
386 id = hash & (smap->n_buckets - 1); in BPF_CALL_3()
387 bucket = READ_ONCE(smap->buckets[id]); in BPF_CALL_3()
392 return id; in BPF_CALL_3()
408 return id; in BPF_CALL_3()
417 return id; in BPF_CALL_3()
431 old_bucket = xchg(&smap->buckets[id], new_bucket); in BPF_CALL_3()
434 return id; in BPF_CALL_3()
524 u32 id = *(u32 *)key, trace_len; in bpf_stackmap_copy() local
526 if (unlikely(id >= smap->n_buckets)) in bpf_stackmap_copy()
[all …]
Dsyscall.c259 int id; in bpf_map_alloc_id() local
263 id = idr_alloc_cyclic(&map_idr, map, 1, INT_MAX, GFP_ATOMIC); in bpf_map_alloc_id()
264 if (id > 0) in bpf_map_alloc_id()
265 map->id = id; in bpf_map_alloc_id()
269 if (WARN_ON_ONCE(!id)) in bpf_map_alloc_id()
272 return id > 0 ? 0 : id; in bpf_map_alloc_id()
284 if (!map->id) in bpf_map_free_id()
292 idr_remove(&map_idr, map->id); in bpf_map_free_id()
293 map->id = 0; in bpf_map_free_id()
400 map->id, in bpf_map_show_fdinfo()
[all …]
Ddisasm.c47 const char *func_id_name(int id) in func_id_name() argument
49 if (id >= 0 && id < __BPF_FUNC_MAX_ID && func_id_str[id]) in func_id_name()
50 return func_id_str[id]; in func_id_name()
Dverifier.c457 verbose(env, "(id=%d", reg->id); in print_verifier_state()
529 if (state->acquired_refs && state->refs[0].id) { in print_verifier_state()
530 verbose(env, " refs=%d", state->refs[0].id); in print_verifier_state()
532 if (state->refs[i].id) in print_verifier_state()
533 verbose(env, ",%d", state->refs[i].id); in print_verifier_state()
624 int id, err; in acquire_reference_state() local
629 id = ++env->id_gen; in acquire_reference_state()
630 state->refs[new_ofs].id = id; in acquire_reference_state()
633 return id; in acquire_reference_state()
643 if (state->refs[i].id == ptr_id) { in release_reference_state()
[all …]
Dbtf.c213 u32 id; member
632 u32 id, int_data; in btf_member_is_reg_int() local
635 id = m->type; in btf_member_is_reg_int()
636 t = btf_type_id_size(btf, &id, NULL); in btf_member_is_reg_int()
863 int id; in btf_alloc_id() local
867 id = idr_alloc_cyclic(&btf_idr, btf, 1, INT_MAX, GFP_ATOMIC); in btf_alloc_id()
868 if (id > 0) in btf_alloc_id()
869 btf->id = id; in btf_alloc_id()
873 if (WARN_ON_ONCE(!id)) in btf_alloc_id()
876 return id > 0 ? 0 : id; in btf_alloc_id()
[all …]
Ddisasm.h20 const char *func_id_name(int id);
/kernel/
Duser_namespace.c215 u32 id; /* id to find */ member
229 id2 = key->id + key->count - 1; in cmp_map_id()
239 if (key->id >= first && key->id <= last && in cmp_map_id()
243 if (key->id < first || id2 < first) in cmp_map_id()
254 map_id_range_down_max(unsigned extents, struct uid_gid_map *map, u32 id, u32 count) in map_id_range_down_max() argument
260 key.id = id; in map_id_range_down_max()
272 map_id_range_down_base(unsigned extents, struct uid_gid_map *map, u32 id, u32 count) in map_id_range_down_base() argument
277 id2 = id + count - 1; in map_id_range_down_base()
283 if (id >= first && id <= last && in map_id_range_down_base()
290 static u32 map_id_range_down(struct uid_gid_map *map, u32 id, u32 count) in map_id_range_down() argument
[all …]
Dseccomp.c56 u64 id; member
751 n.id = seccomp_next_notify_id(match); in seccomp_do_user_notification()
1049 unotif.id = knotif->id; in seccomp_notify_recv()
1071 if (cur->id == unotif.id) { in seccomp_notify_recv()
1105 if (cur->id == resp.id) { in seccomp_notify_send()
1136 u64 id; in seccomp_notify_id_valid() local
1139 if (copy_from_user(&id, buf, sizeof(id))) in seccomp_notify_id_valid()
1148 if (knotif->id == id) { in seccomp_notify_id_valid()
Dworkqueue.c151 int id; /* I: pool ID */ member
546 pool->id = ret; in worker_pool_assign_id()
742 (data & WORK_STRUCT_WQ_DATA_MASK))->pool->id; in get_work_pool_id()
1296 set_work_pool_and_keep_pending(work, pool->id); in try_to_grab_pending()
1910 int id = -1; in create_worker() local
1914 id = ida_simple_get(&pool->worker_ida, 0, 0, GFP_KERNEL); in create_worker()
1915 if (id < 0) in create_worker()
1922 worker->id = id; in create_worker()
1925 snprintf(id_buf, sizeof(id_buf), "%d:%d%s", pool->cpu, id, in create_worker()
1928 snprintf(id_buf, sizeof(id_buf), "u%d:%d", pool->id, id); in create_worker()
[all …]
/kernel/locking/
Dqspinlock_stat.h40 int cpu, id, len; in lockevent_read() local
46 id = (long)file_inode(file)->i_private; in lockevent_read()
48 if (id >= lockevent_num) in lockevent_read()
52 sum += per_cpu(lockevents[id], cpu); in lockevent_read()
56 switch (id) { in lockevent_read()
69 if (id == LOCKEVENT_pv_hash_hops) { in lockevent_read()
86 if ((id == LOCKEVENT_pv_latency_kick) || in lockevent_read()
87 (id == LOCKEVENT_pv_latency_wake)) { in lockevent_read()
Dlock_events.c65 int cpu, id, len; in lockevent_read() local
71 id = (long)file_inode(file)->i_private; in lockevent_read()
73 if (id >= lockevent_num) in lockevent_read()
77 sum += per_cpu(lockevents[id], cpu); in lockevent_read()
/kernel/time/
Dposix-clock.c199 static int get_clock_desc(const clockid_t id, struct posix_clock_desc *cd) in get_clock_desc() argument
201 struct file *fp = fget(clockid_to_fd(id)); in get_clock_desc()
226 static int pc_clock_adjtime(clockid_t id, struct __kernel_timex *tx) in pc_clock_adjtime() argument
231 err = get_clock_desc(id, &cd); in pc_clock_adjtime()
250 static int pc_clock_gettime(clockid_t id, struct timespec64 *ts) in pc_clock_gettime() argument
255 err = get_clock_desc(id, &cd); in pc_clock_gettime()
269 static int pc_clock_getres(clockid_t id, struct timespec64 *ts) in pc_clock_getres() argument
274 err = get_clock_desc(id, &cd); in pc_clock_getres()
288 static int pc_clock_settime(clockid_t id, const struct timespec64 *ts) in pc_clock_settime() argument
293 err = get_clock_desc(id, &cd); in pc_clock_settime()
Dposix-timers.c55 static const struct k_clock *clockid_to_kclock(const clockid_t id);
119 timer_t id) in __posix_timers_find() argument
124 if ((timer->it_signal == sig) && (timer->it_id == id)) in __posix_timers_find()
130 static struct k_itimer *posix_timer_by_id(timer_t id) in posix_timer_by_id() argument
133 struct hlist_head *head = &posix_timers_hashtable[hash(sig, id)]; in posix_timer_by_id()
135 return __posix_timers_find(head, sig, id); in posix_timer_by_id()
1357 static const struct k_clock *clockid_to_kclock(const clockid_t id) in clockid_to_kclock() argument
1359 clockid_t idx = id; in clockid_to_kclock()
1361 if (id < 0) { in clockid_to_kclock()
1362 return (id & CLOCKFD_MASK) == CLOCKFD ? in clockid_to_kclock()
[all …]
Dclockevents.c704 ret = __clockevents_try_unbind(ce, dev->id); in sysfs_unbind_tick_dev()
713 ret = clockevents_unbind(ce, dev->id); in sysfs_unbind_tick_dev()
722 .id = 0,
729 &per_cpu(tick_cpu_device, dev->id); in tick_get_tick_dev()
743 return &per_cpu(tick_cpu_device, dev->id); in tick_get_tick_dev()
756 dev->id = cpu; in tick_init_sysfs()
/kernel/cgroup/
Ddebug.c64 css = cset->subsys[ss->id]; in current_css_set_read()
67 seq_printf(seq, "%2d: %-4s\t- %p[%d]\n", ss->id, ss->name, in current_css_set_read()
68 css, css->id); in current_css_set_read()
217 css = rcu_dereference_check(cgrp->subsys[ss->id], true); in cgroup_subsys_states_read()
226 css->parent->id); in cgroup_subsys_states_read()
227 seq_printf(seq, "%2d: %-4s\t- %p[%d] %d%s\n", ss->id, ss->name, in cgroup_subsys_states_read()
228 css, css->id, in cgroup_subsys_states_read()
Dcgroup.c317 static void *cgroup_idr_replace(struct idr *idr, void *ptr, int id) in cgroup_idr_replace() argument
322 ret = idr_replace(idr, ptr, id); in cgroup_idr_replace()
327 static void cgroup_idr_remove(struct idr *idr, int id) in cgroup_idr_remove() argument
330 idr_remove(idr, id); in cgroup_idr_remove()
470 return rcu_dereference_check(cgrp->subsys[ss->id], in cgroup_css()
520 while (!(cgroup_ss_mask(cgrp) & (1 << ss->id))) { in cgroup_e_css_by_mask()
555 return init_css_set.subsys[ss->id]; in cgroup_e_css()
584 css = init_css_set.subsys[ss->id]; in cgroup_get_e_css()
644 return rcu_dereference_raw(cgrp->subsys[cft->ss->id]); in of_css()
1291 int id; in cgroup_init_root_id() local
[all …]
/kernel/trace/
Dtrace_export.c32 #define FTRACE_ENTRY_REG(name, struct_name, id, tstruct, print, \ argument
34 FTRACE_ENTRY(name, struct_name, id, PARAMS(tstruct), PARAMS(print), \
63 #define FTRACE_ENTRY(name, struct_name, id, tstruct, print, filter) \ argument
76 #define FTRACE_ENTRY_DUP(name, struct_name, id, tstruct, print, filter) \ argument
77 FTRACE_ENTRY(name, struct_name, id, PARAMS(tstruct), PARAMS(print), \
134 #define FTRACE_ENTRY(name, struct_name, id, tstruct, print, filter) \ argument
Dtrace.h71 #define FTRACE_ENTRY(name, struct_name, id, tstruct, print, filter) \ argument
78 #define FTRACE_ENTRY_DUP(name, name_struct, id, tstruct, printk, filter) argument
81 #define FTRACE_ENTRY_REG(name, struct_name, id, tstruct, print, \ argument
83 FTRACE_ENTRY(name, struct_name, id, PARAMS(tstruct), PARAMS(print), \
87 #define FTRACE_ENTRY_PACKED(name, struct_name, id, tstruct, print, \ argument
89 FTRACE_ENTRY(name, struct_name, id, PARAMS(tstruct), PARAMS(print), \
369 #define IF_ASSIGN(var, entry, etype, id) \ argument
372 WARN_ON(id != 0 && (entry)->type != id); \
1902 #define FTRACE_ENTRY(call, struct_name, id, tstruct, print, filter) \ argument
1906 #define FTRACE_ENTRY_DUP(call, struct_name, id, tstruct, print, filter) \ argument
[all …]
Dtrace_syscalls.c308 static void ftrace_syscall_enter(void *data, struct pt_regs *regs, long id) in ftrace_syscall_enter() argument
482 int id; in init_syscall_trace() local
495 id = trace_event_raw_init(call); in init_syscall_trace()
497 if (id < 0) { in init_syscall_trace()
499 return id; in init_syscall_trace()
502 return id; in init_syscall_trace()
583 static void perf_syscall_enter(void *ignore, struct pt_regs *regs, long id) in perf_syscall_enter() argument
Dtrace_entries.h252 __field( unsigned int, id )
257 __entry->id, (int)__entry->buf[0]),
/kernel/debug/
Dgdbstub.c408 static char *pack_threadid(char *pkt, unsigned char *id) in pack_threadid() argument
413 limit = id + (BUF_THREAD_ID_SIZE / 2); in pack_threadid()
414 while (id < limit) { in pack_threadid()
415 if (!lzero || *id != 0) { in pack_threadid()
416 pkt = hex_byte_pack(pkt, *id); in pack_threadid()
419 id++; in pack_threadid()
428 static void int_to_threadref(unsigned char *id, int value) in int_to_threadref() argument
430 put_unaligned_be32(value, id); in int_to_threadref()
/kernel/sched/
Dautogroup.c77 ag->id = atomic_inc_return(&autogroup_seq_nr); in autogroup_create()
254 seq_printf(m, "/autogroup-%ld nice %d\n", ag->id, ag->nice); in proc_sched_autogroup_show_task()
267 return snprintf(buf, buflen, "%s-%ld", "/autogroup", tg->autogroup->id); in autogroup_path()
/kernel/irq/
Dtimings.c603 int id; in irq_timings_alloc() local
620 id = idr_alloc(&irqt_stats, s, irq, irq + 1, GFP_NOWAIT); in irq_timings_alloc()
623 if (id < 0) { in irq_timings_alloc()
625 return id; in irq_timings_alloc()
Daffinity.c99 unsigned id; member
138 node_vectors[n].id = n; in alloc_nodes_vectors()
298 cpumask_and(nmsk, cpu_mask, node_to_cpumask[nv->id]); in __irq_build_affinity_masks()

12